diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000000..4f67c422f8 --- /dev/null +++ b/.flake8 @@ -0,0 +1,49 @@ +[flake8] +# References: +# https://flake8.readthedocs.io/en/latest/user/configuration.html +# https://flake8.readthedocs.io/en/latest/user/error-codes.html +# https://pycodestyle.readthedocs.io/en/latest/intro.html#error-codes + +max-line-length = 80 +max-complexity = 50 +select = C,E,F,W,B,B950 +ignore = + # E203: whitespace before ':' + E203, + # E226: missing whitespace around arithmetic operator + E226, + # E231: missing whitespace after ',', ';', or ':' + E231, + # E402: module level imports on one line + E402, + # E501: line too long + E501, + # E731: do not assign a lambda expression, use a def + E731, + # W503: line break before binary operator + W503, + # W504: line break after binary operator + W504, +exclude = + # + # ignore the following directories + # + .eggs, + build, + docs/src/sphinxext/*, + tools/*, + benchmarks/*, + # + # ignore auto-generated files + # + _ff_cross_refrences.py, + std_names.py, + um_cf_map.py, + # + # ignore third-party files + # + gitwash_dumper.py, + # + # convenience imports + # + lib/iris/common/__init__.py diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 0000000000..f6cae020f3 --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,20 @@ +# Format: numpy array format (#5235) +c18dcd8dafef0cc7bbbf80dfce66f76a46ce59c5 + +# style: flake8 (#3755) +7c86bc0168684345dc475457b1a77dadc77ce9bb + +# style: black (#3518) +ffcfad475e0593e1e40895453cf1df154e5f6f2c + +# style: isort (#4174) +15bbcc5ac3d539cb6e820148b66e7cf55d91c5d2 + +# style: blacken-docs (#4205) +1572e180243e492d8ff76fa8cdefb82ef6f90415 + +# style: sort-all (#4353) +64705dbc40881233aae45f051d96049150369e53 + +# style: codespell (#5186) +417aa6bbd9b10d25cad7def54d47ef4d718bc38d diff --git a/.git_archival.txt b/.git_archival.txt new file mode 100644 index 0000000000..3994ec0a83 --- /dev/null +++ b/.git_archival.txt @@ -0,0 +1,4 @@ +node: $Format:%H$ +node-date: $Format:%cI$ +describe-name: $Format:%(describe:tags=true)$ +ref-names: $Format:%D$ diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000000..82bf71c1c5 --- /dev/null +++ b/.gitattributes @@ -0,0 +1 @@ +.git_archival.txt export-subst \ No newline at end of file diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index 425a427357..594c45a1ee 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -1,36 +1,13 @@ -How to contribute -================= +# Contributing to Iris -We want Iris to be driven by the community - your contributions are -invaluable to us! This page lists the guidelines for contributors which -will help ease the process of getting your hard work accepted into Iris, -and shared back to the world. +Want to contribute to Iris? But don't know where to start 🤔 -Getting started ---------------- +We recommend that you first checkout our advice to [First Time Contributors](https://github.com/SciTools/iris/issues/4133), +which has some solid suggestions on where to begin. -1. If you've not already got one, sign up for a - [GitHub account](https://github.com/signup/free). -1. Fork the Iris repository, create your new fix/feature branch, and - start commiting code. - - Our - [development guide](http://scitools.org.uk/iris/docs/latest/developers_guide/gitwash/git_development.html) - has more detail. -1. Make sure you've added appropriate tests, and that *all* the tests - pass. +Otherwise, head over to our [Developers Guide on Getting Involved](https://scitools-iris.readthedocs.io/en/stable/developers_guide/contributing_getting_involved.html) +for all the information you need to start on your Iris development journey. +Still need help or advice? -Submitting changes ------------------- - -1. Read and sign the Contributor Licence Agreement (CLA). - - See our [governance page](http://scitools.org.uk/governance.html) - for the CLA and what to do with it. -1. Push your branch to your fork of Iris. -1. Submit your pull request. -1. Chillax. - - -If in doubt, please -[contact us](https://groups.google.com/forum/?fromgroups=#!forum/scitools-iris) -on our Google Group, and we'll be happy to help you. +Then join us over on our [Iris GitHub Discussions](https://github.com/SciTools/iris/discussions). We'd love to hear from you! \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md new file mode 100644 index 0000000000..134b6ff8da --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug-report.md @@ -0,0 +1,39 @@ +--- +name: "\U0001F41B Bug Report" +about: Submit a bug report to help us improve Iris +title: '' +labels: 'Type: Bug' +assignees: '' + +--- + +## 🐛 Bug Report + + +## How To Reproduce +Steps to reproduce the behaviour: + +1. +2. +3. + +## Expected behaviour + + +## Screenshots + + +## Environment + - OS & Version: [e.g., Ubuntu 20.04 LTS] + - Iris Version: [e.g., From the command line run `python -c "import iris; print(iris.__version__)"`] + +## Additional context + +
+Click to expand this section... + +``` +Please add additional verbose information in this section e.g., code, output, tracebacks, screenshots etc +``` +
+ diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000000..84af305034 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,9 @@ +# reference: https://docs.github.com/en/github/building-a-strong-community/configuring-issue-templates-for-your-repository#configuring-the-template-chooser +blank_issues_enabled: false +contact_links: + - name: 💬 Iris GitHub Discussions + url: https://github.com/SciTools/iris/discussions + about: Engage with the Iris community to discuss your issue + - name: ❓ Usage Question + url: https://github.com/SciTools/iris/discussions/categories/q-a + about: Raise a question about how to use Iris in the Q&A section of Discussions diff --git a/.github/ISSUE_TEMPLATE/documentation.md b/.github/ISSUE_TEMPLATE/documentation.md new file mode 100644 index 0000000000..01eb2a6734 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/documentation.md @@ -0,0 +1,12 @@ +--- +name: "\U0001F4DA Documentation" +about: Report an issue with the Iris documentation +title: '' +labels: 'Type: Documentation' +assignees: '' + +--- + +## 📚 Documentation + + diff --git a/.github/ISSUE_TEMPLATE/feature-request.md b/.github/ISSUE_TEMPLATE/feature-request.md new file mode 100644 index 0000000000..2f66321405 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature-request.md @@ -0,0 +1,25 @@ +--- +name: "✨ Feature Request" +about: Submit a request for a new feature in Iris +title: '' +assignees: '' + +--- + +## ✨ Feature Request + + +## Motivation + + + +## Additional context + +
+Click to expand this section... + +``` +Please add additional verbose information in this section e.g., references, screenshots, listings etc +``` +
+ diff --git a/.github/ISSUE_TEMPLATE/issue.md b/.github/ISSUE_TEMPLATE/issue.md new file mode 100644 index 0000000000..63de163743 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/issue.md @@ -0,0 +1,10 @@ +--- +name: "\U0001F4F0 Custom Issue" +about: Submit a generic issue to help us improve Iris +title: '' +assignees: '' + +--- + +## 📰 Custom Issue + diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000..82ba80c4ff --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,20 @@ +# Reference: +# - https://docs.github.com/en/code-security/supply-chain-security/keeping-your-dependencies-updated-automatically/keeping-your-actions-up-to-date-with-dependabot +# - https://docs.github.com/en/code-security/supply-chain-security/keeping-your-dependencies-updated-automatically/configuration-options-for-dependency-updates + +version: 2 +updates: + + - package-ecosystem: "github-actions" + directories: + - "/" + - "/.github/workflows/composite/*" + schedule: + # Check later in the week - the upstream dependabot check in `workflows` runs deliberately early in the week. + # Therefore allowing time for the `workflows` update to be merged-and-released first. + interval: "weekly" + day: "thursday" + time: "01:00" + timezone: "Europe/London" + labels: + - "Bot" diff --git a/.github/deploy_key.scitools-docs.enc b/.github/deploy_key.scitools-docs.enc deleted file mode 100644 index 165a7c1970..0000000000 --- a/.github/deploy_key.scitools-docs.enc +++ /dev/null @@ -1 +0,0 @@ -gAAAAABZSMeGIlHxHu4oCV_h8shbCRf1qJYoLO9Z0q9uKRDTlytoigzlvfxhN-9WMjc3Js1f1Zg55PfEpTOpL82p6QHF-gqW0k0qGjanO3lnQzM6EzIu3KyJPrVrL-O6edwoPMYKqwsNO3VQHNuEspsFKY0TbjnTPHc45SPU5LjEGX4c_SADSDcLDJm2rbrU2eVkT-gFHy_-ZzK0Di83WlDc79YzIkVe5BAn5PbWv3O9BROR4fJzecbjmWRT_rp1cqI_gaUpVcwTdRK3II9YnazBtW4h2WbCeTcySLD7N4o9K0P71SR6gG_XFbpML3Haf5IUdRi0qPBuvJ_4YVnnuJo6mhiIOJfUEcNj_bbLOYVzPmKyQMHvrPf_lK5JhdX6MUvqluhqHuc0i_z_j1O2y32lB7b1iiY6eE_BsNlXJHlOX1GiXkX0nZLI48p-D22jya44WshWSnVcoalcCDkdbvdFbpOscwXDR3nB-PCOmRUF_d1BlMbp1if-VP0yt3tJ_5yyCrqSRWwFusaibQTF6yoImetl7Am95hh2FjFDNkalHqtarnUv86w-26v1ukcTIjJ0iHzNbCK1m0VMkvE6uDeqRgIZnVKON5cesmM3YbulRrHpaOiSly_sMhLhfg5jTxAuOa319AQGoHEOcRLRUYdw2TQkDEiHGiUh_U4-nC7GTGDGcXyeBIa4ciuC2Qi0QXf9qyEGoIRcU8BP34LDNdtovJoZOBDzhr5Ajnu7yA3GB3TD_kiZrgm6agFuu7a51OMfjezhwGzUJ4X-empPctwm9woOJmPCTFqCvxB2VwVV0L6yngsTooyAHCi5st_AG-p5FIT3VZGx7EgCd68ze9XlRoACoe9XOdSFklbaSMGRbJlvKCPAA0zj4__PfIhlD8Cxwwjq_VXlSr_QxygIGZJlhkT46P9TroolgdipaBp1aQ3_PKHfgw5Y9ZqBKCZF5DOJejqUbfVKUp2JdqoX3yQBD0ByQFdfCuLvoiYcM2ofKdIMvel3Jwn0Nx4NYR2qg3h7FYti0jdrNlC89gnL4tKsf0DAGxZ1UYmqQMWJ3-GKCKrlKyeaHYB2djPRGP8VeoRZh_UorSNHU56KSztK_hTP6P0nFymRJRUSRBMKTaTfJf1aBlk9zJHSe9hOKwxyUNkwcTftGn5P0WNcnaTk3ecTVe-1QJKbPWwMBDzqQtTCsCizgN4UdQsmy4iMYq-LT2TC-JXXo0CPTNDybUj92wSa7KeKTvKnbN8DMZbGRdgy5BOSGw4hMIoIFSB-6tnBIvTntNfMT9ac9e9jKm47Q4qXpaeF3AsvBqxkMRQLaYVppPng6cA49VjJQDZ0gTdPKSSKZkApfeeQL0LLCGwzQ4C52TWK2NJSQ3pvRYI1F0taDQWopIiwFfox-OSYnOJECHkHjxaxhHQzVb3w47xKKZNXbLb-LV7QI-kGuKLfoqO1lq94cw1H-EVrXaGJcDDLjK2jRgdVfDyPsHMcW1oUDJqu8gQ6fCXYPbqJzdmFNFsc1hywHWCU7crV61D2QubwzbLRnP8053MvsMnbdhWtwocTlvvdG-qW6CiEA9Eanfpf0RW1W9oh6yQJ__0vS9UWswqq5ahkkpHY9LTE0US4L3xbFOrq7HgbA2jelTdPVfxo3BfUHuL8oKpFDTzgZi07gNmkhIZfpuXj2KFnm9XM31AsY6V2rXL0xSx-9rvi4FP0LK6V5vQ8OKI8aRPCDyzLUv2xnayMW4yaYg3GHD5yo7pIOswKc6GOEmetPnay3j0dVN3hfpkpfJWhss3vjZ2Zl0NmjJ7OuS25tjUGLy82A1yFSpL8mKRkHZJuMDZbd_Or6gaPVoVT_Otbkh-6pMZuDeOHOUfgey0Z374jCjRpyQ9k-Fpw8ykow8iIIQ088kC5CeQy6jRhD7mO3iR4-U1XKDJQNlNg1z_JYyDrwykp7FFN2sQn7RRYHIXx2iMrEDXdrdTrujMFN6omC13yDuXJukAgZb6zBBUTlonxRUBjUJWt2P-1sRRTsG8mr9EaE5K-xhR5Ust_37L3svNQ0vwLtPLIpWGZHhD8P_dYNR2RL4679xyzI8A7wLY82wFBHrcghAd4UtLJH9ul6IuS_CaVo-gbfowNRaQ0Zw7WHZGIXpZWEx1_zck6qDEaCY8TpQeciBWpH5uJDSYqdLdMwigdQEGzAJ1DHSWsyTrmOR7Lhwi9WqOzfWe4ahxAkAUH_Jdr_i-nGfl_x3OgQdHM7jWVMXDcXEmR0bkw-s0EKXCn20q2bxDkm5SUWkYtWAZ2aZRgo4wHOqGBcP99xZ25mq9uxtNOkLBF81lnVbn_4BAZBNnnKwwj4SafeIW4KR1ZOpnEI47sGUR6NhEk9VtJsv0zeZIv8VjRbNLh3QCxkNMue60SjJ48kjotZSX1RQJN0xwPftiABBf8MX9tyZe8emQvPeIcdQTSQPnYEUx22xZGeeJTNrZ9soQyP6mrkkRihp6o9tG7HT9QEVLGM19wAigwAAMMXGqdGzWwpar30JtJU94gAmIlwFUJqeO_fdJKFspnUyJ6gt5_oHsKNEV7Uz5EJwGpa94tlPJXjvZpu-wWQfu8U0trTU2mTCA0bmZIDID-Xk4vCW_SD4OVnsvWyga4QHSg3AqVTjnjlapAjsYcFjiOo2C_U3besloprpyuAwpTdn7zdfMHIJO0ckBFnXlk8XB3kT0YGrCpBvW6gYMXlnePVcr3wJehCvMg1Q9Dc5fVQUqt65zcjbgiudfzFGtTe9T4f1IttoAtrJgTN4W1mtbZzSK864I_ngaX5YWgZSinjkbocCCFEJDcbiXMnV7OWOZefqW6VZu4BZKEKlN9k2kH3UCECCK3uRAQIPn_48DgaVnAff2-fMADltiosSPJ_a3057acJP0cf-1QsJuV7r3zdzL3shgrMRjpSsSTCYdMhZ6disFGcJg7hJJvtH1FieZ76jps5FYi5lE8Ua9yBKlG4dCGuUBnikvpfy2FLMLFNn-iXLflu2oiBbcLvn_ReZUnFIR6KgGRN8xKEBaXATQVtb2E678GtQptK8PHP2DoAtbsIXUDn60YH04D9pEck8NnmWYAz7sWbiL6OKdaO7jQep4mt3CgkyFC0NCKP9zCbVNtmfHRVmHtckjgfHF-tK_v59KeAuwWPtm7ow2BjynAK42IGR9nWtQFRUZIboaND8UF76YGKFF7kOf_XTvoNrVTCRkD6b8KJy2IFfdoHP6WET9QLvwDSXgYLPlCX9z7aQ_lc57u5d_dGO-7NZ_Qbs69ByyIvQoztVBjw6fa7EzSwccqPfMQL_fiecNCng-r4gHaH6TlgSbfqQOISHxTtvmbym1no560ZsHfnQfuL6BCI8s6OoygxhOnQhaDqyOUVBut_x3VR_DKFMyUazXYNgLbRsdITaAvR-0gIx5TAX9n3A4HwHuiBZCtwRYaiJnW8FX9lk1Y_g5UHL2OC3rsNFui3aBLzAFhx58lALxnxhlUItuHHK9BgexnR2yCj2nOWLoWQzfFaf2_fpjEh_QBHTqUxdQZ8ighg_8lh6hmLbW4PcUxKX71RFmikLyS3-idlzsiEomNlPNaVllRF21vE6dR-nZ6xsxzTvNB4wumP2irQ9mFBTN1WpiLMyNoEEucA2I848YHUfkZrjTG_dcCQNp7H_2gKdIsZ135lUEG6lYfhLMHTmP5uYxxx3Pipjp6wF2GFCsZPIlIPsgrhbSxqkWg1EOViHtpw6ypFKn7wQHHfnrnHkFWnrKbMARVBjJUB-FhK4b6qLU_k_MTMipemneMUFXlj3EkEhKM18MIHGkIOkwG5QtPYcjUAf_2sZlxSMVnh6sQ8kVwF6lfk_l8jhoO93HUTntZUSv7GrE3s80yJgII4Qw37AdgcJiAkoPn1-17HfSsAy6uRh5-OvrCtkDqQxfuJSyn_4pRMh6hZT7N9pI5limMXXn2nHnxU93UT3qU-smA8q0ECfvK3JwoaYy_llSx0wSBvpmxjLQ302sFYM5FVZ9zRbHuLCCZShVopiyMDLHVJe_1g9Ou1KL-h6RVZgg3Ttyb5m2KDfoHEVLeZkW81YLCsyo7uNb6SVRM-615TIVGT6Eq7oJ6wO2LMDKjEpHKFiOFpY2fpR8noM81UqgLddYfl_lei7RVjaNO98otqE4iSNtpgJgyhAx4CdYm__yQRSXhckR4K7yAhM9Kh5BLbQQnf2_0WS1sWTmNMZZNMfOSqmTCRVwcYvg4TDGOA-vZARbZW1M7npVMldV_SbvgcEZD6InY9c40eheRqS0YD2W2HEZIiNeLRw0y5WBcYuJIpXhI3ViTXx-frJnv0Mo9uwmuLbJmWFcn6RdIVcU68_oPZZlZD4Vm7SjikbuZKF1BF3lXamTTDIBcWiDLwuNDv2lUkURDCWa5WJsfUCfTAJ6PTe8= \ No newline at end of file diff --git a/.github/labeler.yml b/.github/labeler.yml new file mode 100644 index 0000000000..14e9a95d30 --- /dev/null +++ b/.github/labeler.yml @@ -0,0 +1,9 @@ +# benchmark_this automatically triggers the benchmark workflow when added by +# a user. No triggering happens when GitHub Actions adds the label (this +# avoids security vulnerabilities), so alternative triggers for the below +# files are therefore included in workflows/benchmarks_run.yml. Automatic +# labelling is still included here to make it easier to search pull requests, +# and to reinforce the culture of using this label. +benchmark_this: +- changed-files: + - any-glob-to-any-file: ['requirements/locks/*.lock', "setup.py"] diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000000..34bc59182c --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,14 @@ +## 🚀 Pull Request + +### Description + + + + +--- +[Consult Iris pull request check list]( https://scitools-iris.readthedocs.io/en/latest/developers_guide/contributing_pull_request_checklist.html) + +--- +Add any of the below labels to trigger actions on this PR: + +- https://github.com/SciTools/iris/labels/benchmark_this diff --git a/.github/workflows/benchmarks_report.yml b/.github/workflows/benchmarks_report.yml new file mode 100644 index 0000000000..93a2bc1a77 --- /dev/null +++ b/.github/workflows/benchmarks_report.yml @@ -0,0 +1,83 @@ +# Post any reports generated by benchmarks_run.yml . +# Separated for security: +# https://securitylab.github.com/research/github-actions-preventing-pwn-requests/ + +name: benchmarks-report +run-name: Report benchmark results + +on: + workflow_run: + workflows: [benchmarks-run] + types: + - completed + +jobs: + download: + runs-on: ubuntu-latest + outputs: + reports_exist: ${{ steps.unzip.outputs.reports_exist }} + steps: + - name: Download artifact + id: download-artifact + # https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#using-data-from-the-triggering-workflow + uses: actions/github-script@v7 + with: + script: | + let allArtifacts = await github.rest.actions.listWorkflowRunArtifacts({ + owner: context.repo.owner, + repo: context.repo.repo, + run_id: context.payload.workflow_run.id, + }); + let matchArtifact = allArtifacts.data.artifacts.filter((artifact) => { + return artifact.name == "benchmark_reports" + })[0]; + if (typeof matchArtifact != 'undefined') { + let download = await github.rest.actions.downloadArtifact({ + owner: context.repo.owner, + repo: context.repo.repo, + artifact_id: matchArtifact.id, + archive_format: 'zip', + }); + let fs = require('fs'); + fs.writeFileSync(`${process.env.GITHUB_WORKSPACE}/benchmark_reports.zip`, Buffer.from(download.data)); + }; + + - name: Unzip artifact + id: unzip + run: | + if test -f "benchmark_reports.zip"; then + reports_exist=1 + unzip benchmark_reports.zip -d benchmark_reports + else + reports_exist=0 + fi + echo "reports_exist=$reports_exist" >> "$GITHUB_OUTPUT" + + - name: Store artifact + uses: actions/upload-artifact@v4 + with: + name: benchmark_reports + path: benchmark_reports + + post_reports: + runs-on: ubuntu-latest + needs: download + if: needs.download.outputs.reports_exist == 1 + steps: + - name: Checkout repo + uses: actions/checkout@v4 + + - name: Download artifact + uses: actions/download-artifact@v4 + with: + name: benchmark_reports + path: .github/workflows/benchmark_reports + + - name: Set up Python + # benchmarks/bm_runner.py only needs builtins to run. + uses: actions/setup-python@v5 + + - name: Post reports + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: python benchmarks/bm_runner.py _gh_post diff --git a/.github/workflows/benchmarks_run.yml b/.github/workflows/benchmarks_run.yml new file mode 100644 index 0000000000..287735c335 --- /dev/null +++ b/.github/workflows/benchmarks_run.yml @@ -0,0 +1,157 @@ +# Use ASV to check for performance regressions, either: +# - In the last 24 hours' commits. +# - Introduced by this pull request. + +name: benchmarks-run +run-name: Run benchmarks + +on: + schedule: + # Runs every day at 23:00. + - cron: "0 23 * * *" + workflow_dispatch: + inputs: + first_commit: + description: "First commit to benchmark (see bm_runner.py > Overnight)." + required: false + type: string + pull_request: + # Add the `labeled` type to the default list. + types: [labeled, opened, synchronize, reopened] + +jobs: + pre-checks: + runs-on: ubuntu-latest + if: github.repository == 'SciTools/iris' + outputs: + overnight: ${{ steps.overnight.outputs.check }} + branch: ${{ steps.branch.outputs.check }} + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 2 + - id: files-changed + uses: marceloprado/has-changed-path@df1b7a3161b8fb9fd8c90403c66a9e66dfde50cb + with: + # SEE ALSO .github/labeler.yml . + paths: requirements/locks/*.lock setup.py + - id: overnight + if: github.event_name != 'pull_request' + run: echo "check=true" >> "$GITHUB_OUTPUT" + - id: branch + if: > + github.event_name == 'pull_request' + && + ( + steps.files-changed.outputs.changed == 'true' + || + github.event.label.name == 'benchmark_this' + ) + run: echo "check=true" >> "$GITHUB_OUTPUT" + + + benchmark: + runs-on: ubuntu-latest + needs: pre-checks + if: > + needs.pre-checks.outputs.overnight == 'true' || + needs.pre-checks.outputs.branch == 'true' + + env: + IRIS_TEST_DATA_LOC_PATH: benchmarks + IRIS_TEST_DATA_PATH: benchmarks/iris-test-data + IRIS_TEST_DATA_VERSION: "2.28" + # Lets us manually bump the cache to rebuild + ENV_CACHE_BUILD: "0" + TEST_DATA_CACHE_BUILD: "2" + + steps: + # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Install Nox + run: | + pip install nox + + - name: Cache environment directories + id: cache-env-dir + uses: actions/cache@v4 + with: + path: | + .nox + benchmarks/.asv/env + $CONDA/pkgs + key: ${{ runner.os }}-${{ hashFiles('requirements/') }}-${{ env.ENV_CACHE_BUILD }} + + - name: Cache test data directory + id: cache-test-data + uses: actions/cache@v4 + with: + path: | + ${{ env.IRIS_TEST_DATA_PATH }} + key: + test-data-${{ env.IRIS_TEST_DATA_VERSION }}-${{ env.TEST_DATA_CACHE_BUILD }} + + - name: Fetch the test data + if: steps.cache-test-data.outputs.cache-hit != 'true' + run: | + wget --quiet https://github.com/SciTools/iris-test-data/archive/v${IRIS_TEST_DATA_VERSION}.zip -O iris-test-data.zip + unzip -q iris-test-data.zip + mkdir --parents ${GITHUB_WORKSPACE}/${IRIS_TEST_DATA_LOC_PATH} + mv iris-test-data-${IRIS_TEST_DATA_VERSION} ${GITHUB_WORKSPACE}/${IRIS_TEST_DATA_PATH} + + - name: Set test data var + run: | + echo "OVERRIDE_TEST_DATA_REPOSITORY=${GITHUB_WORKSPACE}/${IRIS_TEST_DATA_PATH}/test_data" >> $GITHUB_ENV + + - name: Benchmark this pull request + if: needs.pre-checks.outputs.branch == 'true' + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + PR_NUMBER: ${{ github.event.number }} + run: | + nox -s benchmarks -- branch origin/${{ github.base_ref }} + + - name: Run overnight benchmarks + id: overnight + if: needs.pre-checks.outputs.overnight == 'true' + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + first_commit=${{ inputs.first_commit }} + if [ "$first_commit" == "" ] + then + first_commit=$(git log --after="$(date -d "1 day ago" +"%Y-%m-%d") 23:00:00" --pretty=format:"%h" | tail -n 1) + fi + + if [ "$first_commit" != "" ] + then + nox -s benchmarks -- overnight $first_commit + fi + + - name: Warn of failure + if: > + failure() && + steps.overnight.outcome == 'failure' + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + title="Overnight benchmark workflow failed: \`${{ github.run_id }}\`" + body="Generated by GHA run [\`${{github.run_id}}\`](https://github.com/${{github.repository}}/actions/runs/${{github.run_id}})" + gh issue create --title "$title" --body "$body" --label "Bot" --label "Type: Performance" --repo $GITHUB_REPOSITORY + + - name: Upload any benchmark reports + if: success() || steps.overnight.outcome == 'failure' + uses: actions/upload-artifact@v4 + with: + name: benchmark_reports + path: .github/workflows/benchmark_reports + + - name: Archive asv results + if: ${{ always() }} + uses: actions/upload-artifact@v4 + with: + name: asv-raw-results + path: benchmarks/.asv/results diff --git a/.github/workflows/ci-citation.yml b/.github/workflows/ci-citation.yml new file mode 100644 index 0000000000..99cced758b --- /dev/null +++ b/.github/workflows/ci-citation.yml @@ -0,0 +1,30 @@ +name: ci-citation + +on: + pull_request: + paths: + - "CITATION.cff" + + push: + paths: + - "CITATION.cff" + + workflow_dispatch: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + validate: + name: "validate" + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: "check CITATION.cff" + uses: citation-file-format/cffconvert-github-action@4cf11baa70a673bfdf9dad0acc7ee33b3f4b6084 + with: + args: "--validate" diff --git a/.github/workflows/ci-manifest.yml b/.github/workflows/ci-manifest.yml new file mode 100644 index 0000000000..18b7fb224d --- /dev/null +++ b/.github/workflows/ci-manifest.yml @@ -0,0 +1,26 @@ +# Reference +# - https://github.com/actions/checkout + +name: ci-manifest + +on: + pull_request: + branches: + - "*" + + push: + branches-ignore: + - "auto-update-lockfiles" + - "pre-commit-ci-update-config" + - "dependabot/*" + + workflow_dispatch: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + manifest: + name: "check-manifest" + uses: scitools/workflows/.github/workflows/ci-manifest.yml@2024.10.1 diff --git a/.github/workflows/ci-tests.yml b/.github/workflows/ci-tests.yml new file mode 100644 index 0000000000..4b21e73384 --- /dev/null +++ b/.github/workflows/ci-tests.yml @@ -0,0 +1,148 @@ +# reference: +# - https://github.com/actions/cache +# - https://github.com/actions/checkout +# - https://github.com/marketplace/actions/setup-miniconda + +name: ci-tests + +on: + push: + branches: + - "main" + - "v*x" + tags: + - "v*" + pull_request: + branches: + - "*" + workflow_dispatch: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + tests: + name: "${{ matrix.session }} (py${{ matrix.python-version }} ${{ matrix.os }})" + + runs-on: ${{ matrix.os }} + + defaults: + run: + shell: bash -l {0} + + strategy: + fail-fast: false + matrix: + os: ["ubuntu-latest"] + python-version: ["3.12"] + session: ["doctest", "gallery", "linkcheck"] + include: + - os: "ubuntu-latest" + python-version: "3.12" + session: "tests" + coverage: "--coverage" + - os: "ubuntu-latest" + python-version: "3.11" + session: "tests" + - os: "ubuntu-latest" + python-version: "3.10" + session: "tests" + + env: + # NOTE: IRIS_TEST_DATA_VERSION is also set in benchmarks_run.yml + IRIS_TEST_DATA_VERSION: "2.28" + ENV_NAME: "ci-tests" + + steps: + - name: "checkout" + uses: actions/checkout@v4 + + - name: "environment configure" + env: + # Maximum cache period (in weeks) before forcing a cache refresh. + CACHE_WEEKS: 2 + run: | + echo "CACHE_PERIOD=$(date +%Y).$(expr $(date +%U) / ${CACHE_WEEKS})" >> ${GITHUB_ENV} + echo "LOCK_FILE=requirements/locks/py$(echo ${{ matrix.python-version }} | tr -d '.')-linux-64.lock" >> ${GITHUB_ENV} + + - name: "data cache" + uses: ./.github/workflows/composite/iris-data-cache + with: + cache_build: 6 + env_name: ${{ env.ENV_NAME }} + version: ${{ env.IRIS_TEST_DATA_VERSION }} + + - name: "conda package cache" + uses: ./.github/workflows/composite/conda-pkg-cache + with: + cache_build: 6 + cache_period: ${{ env.CACHE_PERIOD }} + env_name: ${{ env.ENV_NAME }} + + - name: "conda install" + uses: conda-incubator/setup-miniconda@v3 + with: + miniforge-version: latest + channels: conda-forge + activate-environment: ${{ env.ENV_NAME }} + auto-update-conda: false + + - name: "conda environment cache" + uses: ./.github/workflows/composite/conda-env-cache + with: + cache_build: 6 + cache_period: ${{ env.CACHE_PERIOD }} + env_name: ${{ env.ENV_NAME }} + install_packages: "cartopy nox pip" + + - name: "conda info" + run: | + conda info + conda list + + - name: "cartopy cache" + uses: ./.github/workflows/composite/cartopy-cache + with: + cache_build: 6 + cache_period: ${{ env.CACHE_PERIOD }} + env_name: ${{ env.ENV_NAME }} + + - name: "nox cache" + uses: ./.github/workflows/composite/nox-cache + with: + cache_build: 6 + env_name: ${{ env.ENV_NAME }} + lock_file: ${{ env.LOCK_FILE }} + + # TODO: drop use of site.cfg and explicit use of mplrc + - name: "iris configure" + env: + SITE_CFG: lib/iris/etc/site.cfg + MPL_RC: ${HOME}/.config/matplotlib/matplotlibrc + run: | + mkdir -p $(dirname ${SITE_CFG}) + echo ${SITE_CFG} + echo "[Resources]" >> ${SITE_CFG} + echo "test_data_dir = ${HOME}/iris-test-data/test_data" >> ${SITE_CFG} + echo "doc_dir = ${GITHUB_WORKSPACE}/docs" >> ${SITE_CFG} + cat ${SITE_CFG} + mkdir -p $(dirname ${MPL_RC}) + echo ${MPL_RC} + echo "backend : agg" >> ${MPL_RC} + echo "image.cmap : viridis" >> ${MPL_RC} + cat ${MPL_RC} + + - name: "iris ${{ matrix.session }}" + env: + PY_VER: ${{ matrix.python-version }} + # Force coloured output on GitHub Actions. + PY_COLORS: "1" + run: | + nox --session ${{ matrix.session }} -- --verbose ${{ matrix.coverage }} + + - name: "upload coverage report" + if: ${{ matrix.coverage }} + uses: codecov/codecov-action@v4 + with: + token: ${{ secrets.CODECOV_TOKEN }} \ No newline at end of file diff --git a/.github/workflows/ci-wheels.yml b/.github/workflows/ci-wheels.yml new file mode 100644 index 0000000000..9c53673481 --- /dev/null +++ b/.github/workflows/ci-wheels.yml @@ -0,0 +1,165 @@ +# Reference: +# - https://github.com/actions/checkout +# - https://github.com/actions/download-artifact +# - https://github.com/actions/upload-artifact +# - https://github.com/pypa/build +# - https://github.com/pypa/gh-action-pypi-publish +# - https://test.pypi.org/help/#apitoken + +name: ci-wheels + +on: + pull_request: + + push: + tags: + - "v*" + branches-ignore: + - "auto-update-lockfiles" + - "pre-commit-ci-update-config" + - "dependabot/*" + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + build: + name: "build sdist & wheel" + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: "building" + shell: bash + run: | + pipx run build + + - uses: actions/upload-artifact@v4 + with: + name: pypi-artifacts + path: ${{ github.workspace }}/dist/* + + test-wheel: + needs: build + name: "test wheel (py${{ matrix.python-version }})" + runs-on: ubuntu-latest + defaults: + run: + shell: bash -l {0} + strategy: + fail-fast: false + matrix: + python-version: ["3.10", "3.11", "3.12"] + session: ["wheel"] + env: + ENV_NAME: "ci-wheels" + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - uses: actions/download-artifact@v4 + with: + name: pypi-artifacts + path: ${{ github.workspace }}/dist + + - name: "environment configure" + env: + # Maximum cache period (in weeks) before forcing a cache refresh. + CACHE_WEEKS: 2 + run: | + echo "CACHE_PERIOD=$(date +%Y).$(expr $(date +%U) / ${CACHE_WEEKS})" >> ${GITHUB_ENV} + echo "LOCK_FILE=requirements/locks/py$(echo ${{ matrix.python-version }} | tr -d '.')-linux-64.lock" >> ${GITHUB_ENV} + + - name: "conda package cache" + uses: ./.github/workflows/composite/conda-pkg-cache + with: + cache_build: 0 + cache_period: ${{ env.CACHE_PERIOD }} + env_name: ${{ env.ENV_NAME }} + + - name: "conda install" + uses: conda-incubator/setup-miniconda@v3 + with: + miniforge-version: latest + channels: conda-forge,defaults + activate-environment: ${{ env.ENV_NAME }} + auto-update-conda: false + use-only-tar-bz2: true + + - name: "conda environment cache" + uses: ./.github/workflows/composite/conda-env-cache + with: + cache_build: 0 + cache_period: ${{ env.CACHE_PERIOD }} + env_name: ${{ env.ENV_NAME }} + install_packages: "nox pip" + + - name: "nox cache" + uses: ./.github/workflows/composite/nox-cache + with: + cache_build: 1 + env_name: ${{ env.ENV_NAME }} + lock_file: ${{ env.LOCK_FILE }} + + - name: "nox install and test wheel" + env: + PY_VER: ${{ matrix.python-version }} + run: | + nox --session ${{ matrix.session }} -- --verbose + + show-artifacts: + needs: build + name: "show artifacts" + runs-on: ubuntu-latest + steps: + - uses: actions/download-artifact@v4 + with: + name: pypi-artifacts + path: ${{ github.workspace }}/dist + + - shell: bash + run: | + ls -l ${{ github.workspace }}/dist + + publish-artifacts-test-pypi: + needs: test-wheel + name: "publish to test.pypi" + runs-on: ubuntu-latest + # upload to Test PyPI for every commit on main branch + # and check for the SciTools repo + if: github.event_name == 'push' && github.event.ref == 'refs/heads/main' && github.repository_owner == 'SciTools' + steps: + - uses: actions/download-artifact@v4 + with: + name: pypi-artifacts + path: ${{ github.workspace }}/dist + + - uses: pypa/gh-action-pypi-publish@release/v1 + with: + user: __token__ + password: ${{ secrets.TEST_PYPI_API_TOKEN }} + repository_url: https://test.pypi.org/legacy/ + skip_existing: true + print_hash: true + + publish-artifacts-pypi: + needs: test-wheel + name: "publish to pypi" + runs-on: ubuntu-latest + # upload to PyPI for every tag starting with 'v' + if: github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/v') && github.repository_owner == 'SciTools' + steps: + - uses: actions/download-artifact@v4 + with: + name: pypi-artifacts + path: ${{ github.workspace }}/dist + + - uses: pypa/gh-action-pypi-publish@release/v1 + with: + user: __token__ + password: ${{ secrets.PYPI_API_TOKEN }} + print_hash: true diff --git a/.github/workflows/composite/cartopy-cache/action.yml b/.github/workflows/composite/cartopy-cache/action.yml new file mode 100644 index 0000000000..d42e5c36cb --- /dev/null +++ b/.github/workflows/composite/cartopy-cache/action.yml @@ -0,0 +1,41 @@ +name: "cartopy-cache" +description: "create and cache cartopy assets" + +# +# Assumes the environment contains the following variables: +# - CONDA +# +inputs: + cache_build: + description: "conda environment cache build number" + required: false + default: "0" + cache_period: + description: "conda environment cache timestamp" + required: true + env_name: + description: "environment name" + required: true + +runs: + using: "composite" + steps: + - uses: actions/cache@v4 + id: cartopy-cache + with: + path: ~/.local/share/cartopy + key: ${{ runner.os }}-cartopy-${{ inputs.env_name }}-p${{ inputs.cache_period }}-b${{ inputs.cache_build }} + + - if: steps.cartopy-cache.outputs.cache-hit != 'true' + env: + CARTOPY_SHARE_DIR: ~/.local/share/cartopy + CARTOPY_FEATURE: https://raw.githubusercontent.com/SciTools/cartopy/v0.20.0/tools/cartopy_feature_download.py + shell: bash + run: | + # Require to explicitly activate the environment within the composite action. + source ${{ env.CONDA }}/etc/profile.d/conda.sh >/dev/null 2>&1 + conda activate ${{ inputs.env_name }} + wget --quiet ${CARTOPY_FEATURE} + mkdir -p ${CARTOPY_SHARE_DIR} + # Requires a pre-installed version of cartopy within the environment. + python cartopy_feature_download.py physical --output ${CARTOPY_SHARE_DIR} --no-warn diff --git a/.github/workflows/composite/conda-env-cache/action.yml b/.github/workflows/composite/conda-env-cache/action.yml new file mode 100644 index 0000000000..15eaaec63c --- /dev/null +++ b/.github/workflows/composite/conda-env-cache/action.yml @@ -0,0 +1,35 @@ +name: "conda-env-cache" +description: "create and cache the conda environment" + +# +# Assumes the environment contains the following variables: +# - CONDA +# +inputs: + cache_build: + description: "conda environment cache build number" + required: false + default: "0" + cache_period: + description: "conda environment cache timestamp" + required: true + env_name: + description: "environment name" + required: true + install_packages: + description: "conda packages to install into environment" + required: true + +runs: + using: "composite" + steps: + - uses: actions/cache@v4 + id: conda-env-cache + with: + path: ${{ env.CONDA }}/envs/${{ inputs.env_name }} + key: ${{ runner.os }}-conda-env-${{ inputs.env_name }}-p${{ inputs.cache_period }}-b${{ inputs.cache_build }} + + - if: steps.conda-env-cache.outputs.cache-hit != 'true' + shell: bash + run: | + conda install --quiet --name ${{ inputs.env_name }} ${{ inputs.install_packages }} diff --git a/.github/workflows/composite/conda-pkg-cache/action.yml b/.github/workflows/composite/conda-pkg-cache/action.yml new file mode 100644 index 0000000000..48c4470e44 --- /dev/null +++ b/.github/workflows/composite/conda-pkg-cache/action.yml @@ -0,0 +1,22 @@ +name: "conda-pkg-cache" +description: "cache the conda environment packages" + +inputs: + cache_build: + description: "conda environment cache build number" + required: false + default: "0" + cache_period: + description: "conda environment cache timestamp" + required: true + env_name: + description: "environment name" + required: true + +runs: + using: "composite" + steps: + - uses: actions/cache@v4 + with: + path: ~/conda_pkgs_dir + key: ${{ runner.os }}-conda-pkgs-${{ inputs.env_name }}-p${{ inputs.cache_period }}-b${{ inputs.cache_build }} diff --git a/.github/workflows/composite/iris-data-cache/action.yml b/.github/workflows/composite/iris-data-cache/action.yml new file mode 100644 index 0000000000..7ba7acb2cc --- /dev/null +++ b/.github/workflows/composite/iris-data-cache/action.yml @@ -0,0 +1,30 @@ +name: "iris-data-cache" +description: "create and cache the iris test data" + +inputs: + cache_build: + description: "data cache build number" + required: false + default: "0" + env_name: + description: "environment name" + required: true + version: + description: "iris test data version" + required: true + +runs: + using: "composite" + steps: + - uses: actions/cache@v4 + id: data-cache + with: + path: ~/iris-test-data + key: ${{ runner.os }}-iris-test-data-${{ inputs.env_name }}-v${{ inputs.version }}-b${{ inputs.cache_build }} + + - if: steps.data-cache.outputs.cache-hit != 'true' + shell: bash + run: | + wget --quiet https://github.com/SciTools/iris-test-data/archive/v${{ inputs.version }}.zip -O iris-test-data.zip + unzip -q iris-test-data.zip + mv iris-test-data-${{ inputs.version }} ~/iris-test-data diff --git a/.github/workflows/composite/nox-cache/action.yml b/.github/workflows/composite/nox-cache/action.yml new file mode 100644 index 0000000000..00387331e7 --- /dev/null +++ b/.github/workflows/composite/nox-cache/action.yml @@ -0,0 +1,22 @@ +name: "nox cache" +description: "cache the nox test environments" + +inputs: + cache_build: + description: "nox cache build number" + required: false + default: "0" + env_name: + description: "environment name" + required: true + lock_file: + description: "conda-lock environment requirements filename" + required: true + +runs: + using: "composite" + steps: + - uses: actions/cache@v4 + with: + path: ${{ github.workspace }}/.nox + key: ${{ runner.os }}-nox-${{ inputs.env_name }}-s${{ matrix.session }}-py${{ matrix.python-version }}-b${{ inputs.cache_build }}-${{ hashFiles(inputs.lock_file) }} diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml new file mode 100644 index 0000000000..7914ec2531 --- /dev/null +++ b/.github/workflows/labeler.yml @@ -0,0 +1,15 @@ +# Reference +# - https://github.com/actions/labeler + +name: "Pull Request Labeler" +on: +- pull_request_target + +jobs: + labeler: + permissions: + contents: read + pull-requests: write + runs-on: ubuntu-latest + steps: + - uses: actions/labeler@v5 \ No newline at end of file diff --git a/.github/workflows/refresh-lockfiles.yml b/.github/workflows/refresh-lockfiles.yml new file mode 100644 index 0000000000..3ebb056433 --- /dev/null +++ b/.github/workflows/refresh-lockfiles.yml @@ -0,0 +1,18 @@ +# Updates the environment lock files. See the called workflow in the +# scitools/workflows repo for more details. + +name: Refresh Lockfiles + + +on: + workflow_dispatch: + schedule: + # Run once a week on a Saturday night + # N.B. "should" be quoted, according to + # https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#onschedule + - cron: "1 0 * * 6" + +jobs: + refresh_lockfiles: + uses: scitools/workflows/.github/workflows/refresh-lockfiles.yml@2024.10.1 + secrets: inherit diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml new file mode 100644 index 0000000000..3df5aa3a18 --- /dev/null +++ b/.github/workflows/stale.yml @@ -0,0 +1,83 @@ +# See https://github.com/actions/stale + +name: Stale issues and pull-requests + +on: + schedule: + # Run once a day + # N.B. "should" be quoted, according to + # https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#onschedule + - cron: "0 0 * * *" + +jobs: + stale: + if: "github.repository == 'SciTools/iris'" + runs-on: ubuntu-latest + steps: + - uses: actions/stale@v9 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + + # Idle number of days before marking issues/prs stale. + days-before-stale: 500 + + # Idle number of days before closing stale issues/prs. + days-before-close: 28 + + # Comment on the staled issues. + stale-issue-message: | + In order to maintain a backlog of relevant issues, we automatically label them as stale after 500 days of inactivity. + + If this issue is still important to you, then please comment on this issue and the stale label will be removed. + + Otherwise this issue will be automatically closed in 28 days time. + + # Comment on the staled prs. + stale-pr-message: | + In order to maintain a backlog of relevant PRs, we automatically label them as stale after 500 days of inactivity. + + If this PR is still important to you, then please comment on this PR and the stale label will be removed. + + Otherwise this PR will be automatically closed in 28 days time. + + # Comment on the staled issues while closed. + close-issue-message: | + This stale issue has been automatically closed due to a lack of community activity. + + If you still care about this issue, then please either: + * Re-open this issue, if you have sufficient permissions, or + * Add a comment stating that this is still relevant and someone will re-open it on your behalf. + + # Comment on the staled prs while closed. + close-pr-message: | + This stale PR has been automatically closed due to a lack of community activity. + + If you still care about this PR, then please either: + * Re-open this PR, if you have sufficient permissions, or + * Add a comment pinging `@SciTools/iris-devs` who will re-open on your behalf. + + # Label to apply on staled issues. + stale-issue-label: Stale + + # Label to apply on staled prs. + stale-pr-label: Stale + + # Labels on issues exempted from stale. + exempt-issue-labels: + "Status: Blocked,Status: Decision Required,Peloton 🚴‍♂️,Good First Issue, Dragon 🐉, Dragon Sub-Task 🦎, Release: Major" + + # Labels on prs exempted from stale. + exempt-pr-labels: + "Status: Blocked,Status: Decision Required,Peloton 🚴‍♂️,Good First Issue, Dragon 🐉, Dragon Sub-Task 🦎, Release: Major" + + # Max number of operations per run. + operations-per-run: 300 + + # Remove stale label from issues/prs on updates/comments. + remove-stale-when-updated: true + + # Order to get issues/PRs. + ascending: true + + # Exempt all issues/prs with milestones from stale. + exempt-all-milestones: true diff --git a/.gitignore b/.gitignore index f0420cbc22..1b132cbd38 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,8 @@ *.py[co] +# setuptools-scm +_version.py + # Environment file which should be autogenerated *conda_requirements.txt* @@ -15,6 +18,7 @@ var sdist develop-eggs .installed.cfg +.nox # Installer logs pip-log.txt @@ -25,13 +29,17 @@ pip-cache .tox .pytest_cache +# asv data, environments, results +.asv +benchmarks/.data +.github/workflows/benchmark_reports + #Translations *.mo # Created by Iris build *.so lib/iris/etc/site.cfg -lib/iris/fileformats/_pyke_rules/compiled_krb/ lib/iris/std_names.py # Iris test result files @@ -51,18 +59,23 @@ lib/iris/tests/results/imagerepo.lock /.idea *.cover +# vscode files +.vscode + # Auto generated documentation files -docs/iris/src/_static/random_image.js -docs/iris/src/_templates/gallery.html -docs/iris/src/examples/ -docs/iris/src/iris/ -docs/iris/src/matplotlibrc +docs/src/_build/* +docs/src/generated +docs/src/sg_execution_times.rst # Example test results -docs/iris/iris_image_test_output/ +docs/iris_image_test_output/ # Created by editiors *~ \#* \.\#* *.swp +.ipynb_checkpoints + +# Files generated during test runs. +lib/iris/tests/results/**/*.dot diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000000..053e4f839a --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,78 @@ +# See https://pre-commit.com for more information +# See https://pre-commit.com/hooks.html for more hooks + +files: | + (?x)( + noxfile\.py| + setup\.py| + docs\/.+\.py| + lib\/.+\.py| + benchmarks\/.+\.py + ) +minimum_pre_commit_version: 1.21.0 + +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v5.0.0 + hooks: + # Prevent giant files from being committed. + - id: check-added-large-files + # Check whether files parse as valid Python. + - id: check-ast + # Check for file name conflicts on case-insensitive filesytems. + - id: check-case-conflict + # Check for files that contain merge conflict strings. + - id: check-merge-conflict + # Check for debugger imports and py37+ `breakpoint()` calls in Python source. + - id: debug-statements + # Don't commit to main branch. + - id: no-commit-to-branch + +- repo: https://github.com/astral-sh/ruff-pre-commit + rev: "v0.7.0" + hooks: + - id: ruff + types: [file, python] + args: [--fix, --show-fixes] + - id: ruff-format + types: [file, python] + +- repo: https://github.com/codespell-project/codespell + rev: "v2.3.0" + hooks: + - id: codespell + types_or: [asciidoc, python, markdown, rst] + additional_dependencies: [tomli] + +- repo: https://github.com/PyCQA/flake8 + rev: 7.1.1 + hooks: + - id: flake8 + types: [file, python] + +- repo: https://github.com/asottile/blacken-docs + rev: 1.19.0 + hooks: + - id: blacken-docs + types: [file, rst] + +- repo: https://github.com/aio-libs/sort-all + rev: v1.2.0 + hooks: + - id: sort-all + types: [file, python] + +- repo: https://github.com/pre-commit/mirrors-mypy + rev: 'v1.12.1' + hooks: + - id: mypy + additional_dependencies: + - 'types-requests' + exclude: 'noxfile\.py|docs/src/conf\.py' + +- repo: https://github.com/numpy/numpydoc + rev: v1.8.0 + hooks: + - id: numpydoc-validation + exclude: "^lib/iris/tests/|docs/gallery_code/" + types: [file, python] diff --git a/.readthedocs.yml b/.readthedocs.yml new file mode 100644 index 0000000000..d82bd513ca --- /dev/null +++ b/.readthedocs.yml @@ -0,0 +1,31 @@ +version: 2 + +build: + os: "ubuntu-22.04" + tools: + python: "mambaforge-22.9" + jobs: + post_checkout: + # The SciTools/iris repository is shallow i.e., has a .git/shallow, + # therefore complete the repository with a full history in order + # to allow setuptools-scm to correctly auto-discover the version. + - git fetch --unshallow + - git fetch --all + # Need to stash the local changes that Read the Docs makes so that + # setuptools_scm can generate the correct Iris version. + pre_install: + - git stash + post_install: + - git stash pop + +conda: + environment: requirements/readthedocs.yml + +sphinx: + configuration: docs/src/conf.py + fail_on_warning: false + +python: + install: + - method: pip + path: . diff --git a/.ruff.toml b/.ruff.toml new file mode 100644 index 0000000000..5d78ecdb57 --- /dev/null +++ b/.ruff.toml @@ -0,0 +1,176 @@ +extend = "pyproject.toml" + +lint.ignore = [ + # NOTE: To find a rule code to fix, run: + # ruff --select="ALL" --statistics lib/iris/ + + # Pyflakes (F) + # https://docs.astral.sh/ruff/rules/#pyflakes-f + "F", + + # pycodestyle (E, W) + # https://docs.astral.sh/ruff/rules/#pycodestyle-e-w + "E", + + # mccabe (C90) + # https://docs.astral.sh/ruff/rules/#mccabe-c90 + "C90", + + # pep8-naming (N) + # https://docs.astral.sh/ruff/rules/#pep8-naming-n + "N", + + # pydocstyle (D) + # https://docs.astral.sh/ruff/rules/#pydocstyle-d + # (D-1) Permanent + "D105", # Missing docstring in magic method + # (D-2) Temporary, to be removed when we are more compliant. Rare cases mmove to (1). + "D101", # Missing docstring in public class + "D102", # Missing docstring in public method + # (D-3) Temporary, before an initial review, either fix ocurrences or move to (2). + "D103", # Missing docstring in public function + + # pyupgrade (UP) + # https://docs.astral.sh/ruff/rules/#pyupgrade-up + "UP", + + # flake8-annotations (ANN) + # https://docs.astral.sh/ruff/rules/#flake8-annotations-ann + "ANN", + + # flake8-bandit (S) + # https://docs.astral.sh/ruff/rules/#flake8-bandit-s + "S", + + # flake8-blind-except (BLE) + # https://docs.astral.sh/ruff/rules/#flake8-blind-except-ble + "BLE", + + # flake8-boolean-trap (FBT) + # https://docs.astral.sh/ruff/rules/#flake8-boolean-trap-fbt + "FBT", + + # flake8-bugbear (B) + # https://docs.astral.sh/ruff/rules/#flake8-bugbear-b + "B", + + # flake8-builtins (A) + # https://docs.astral.sh/ruff/rules/#flake8-builtins-a + "A", + + # flake8-comprehensions (C4) + # https://docs.astral.sh/ruff/rules/#flake8-comprehensions-c4 + "C4", + + # flake8-datetimez (DTZ) + # https://docs.astral.sh/ruff/rules/#flake8-datetimez-dtz + "DTZ", + + # flake8-errmsg (EM) + # https://docs.astral.sh/ruff/rules/#flake8-errmsg-em + "EM", + + # flake8-future-annotations (FA) + # https://docs.astral.sh/ruff/rules/#flake8-future-annotations-fa + "FA", + + # flake8-logging-format (G) + # https://docs.astral.sh/ruff/rules/#flake8-logging-format-g + "G", + + # flake8-no-pep420 (INP) + # https://docs.astral.sh/ruff/rules/#flake8-no-pep420-inp + "INP", + + # flake8-pie (PIE) + # https://docs.astral.sh/ruff/rules/#flake8-pie-pie + "PIE", + + # flake8-print (T20) + # https://docs.astral.sh/ruff/rules/#flake8-print-t20 + "T20", + + # flake8-pyi (PYI) + # https://docs.astral.sh/ruff/rules/#flake8-pyi-pyi + "PYI", + + # flake8-pytest-style (PT) + # https://docs.astral.sh/ruff/rules/#flake8-pytest-style-pt + "PT", + + # flake8-raise (RSE) + # https://docs.astral.sh/ruff/rules/#flake8-raise-rse + "RSE", + + # flake8-return (RET) + # https://docs.astral.sh/ruff/rules/#flake8-return-ret + "RET", + + # flake8-self (SLF) + # https://docs.astral.sh/ruff/rules/#flake8-self-slf + "SLF", + + # flake8-slots (SLOT) + # https://docs.astral.sh/ruff/rules/#flake8-slots-slot + "SLOT", + + # flake8-simplify (SIM) + # https://docs.astral.sh/ruff/rules/#flake8-simplify-sim + "SIM", + + # flake8-tidy-imports (TID) + # https://docs.astral.sh/ruff/rules/#flake8-tidy-imports-tid + "TID", + + # flake8-type-checking (TCH) + # https://docs.astral.sh/ruff/rules/#flake8-type-checking-tch + "TCH", + + # flake8-unused-arguments (ARG) + # https://docs.astral.sh/ruff/rules/#flake8-unused-arguments-arg + "ARG", + + # flake8-use-pathlib (PTH) + # https://docs.astral.sh/ruff/rules/#flake8-use-pathlib-pth + "PTH", + + # flake8-todos (TD) + # https://docs.astral.sh/ruff/rules/#flake8-todos-td + "TD", + + # flake8-fixme (FIX) + # https://docs.astral.sh/ruff/rules/#flake8-fixme-fix + "FIX", + + # eradicate (ERA) + # https://docs.astral.sh/ruff/rules/#eradicate-era + "ERA", + + # pandas-vet (PD) + # https://docs.astral.sh/ruff/rules/#pandas-vet-pd + "PD", + + # pygrep-hooks (PGH) + # https://docs.astral.sh/ruff/rules/#pygrep-hooks-pgh + "PGH", + + # Pylint (PL) + # https://docs.astral.sh/ruff/rules/#pylint-pl + "PL", + + # tryceratops (TRY) + # https://docs.astral.sh/ruff/rules/#tryceratops-try + "TRY", + + # flynt (FLY) + # https://docs.astral.sh/ruff/rules/#flynt-fly + "FLY", + + # Perflint (PERF) + # https://docs.astral.sh/ruff/rules/#perflint-perf + "PERF", + + # Ruff-specific rules (RUF) + # https://docs.astral.sh/ruff/rules/#ruff-specific-rules-ruf + "RUF", +] diff --git a/.stickler.yml b/.stickler.yml deleted file mode 100644 index 31d097914e..0000000000 --- a/.stickler.yml +++ /dev/null @@ -1,6 +0,0 @@ -linters: - flake8: - -files: - ignore: - - 'lib/iris/fileformats/um_cf_map.py' diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 32e596e1c1..0000000000 --- a/.travis.yml +++ /dev/null @@ -1,168 +0,0 @@ -# Please update the test data git references below if appropriate. -# -# Note: Contrary to the travis documentation, -# http://about.travis-ci.org/docs/user/languages/python/#Travis-CI-Uses-Isolated-virtualenvs -# we will use conda to give us a much faster setup time. - - -language: minimal -dist: xenial - -env: - global: - # The decryption key for the encrypted .github/deploy_key.scitools-docs.enc. - - secure: "N9/qBUT5CqfC7KQBDy5mIWZcGNuUJk3e/qmKJpotWYV+zwOI4GghJsRce6nFnlRiwl65l5oBEcvf3+sBvUfbZqh7U0MdHpw2tHhr2FSCmMB3bkvARZblh9M37f4da9G9VmRkqnyBM5G5TImXtoq4dusvNWKvLW0qETciaipq7ws=" - matrix: - - PYTHON_VERSION=3.6 TEST_TARGET=default TEST_MINIMAL=true - - PYTHON_VERSION=3.6 TEST_TARGET=default - - PYTHON_VERSION=3.6 TEST_TARGET=example - - - PYTHON_VERSION=3.7 TEST_TARGET=default TEST_MINIMAL=true - - PYTHON_VERSION=3.7 TEST_TARGET=default - - PYTHON_VERSION=3.7 TEST_TARGET=example - - PYTHON_VERSION=3.7 TEST_TARGET=doctest PUSH_BUILT_DOCS=true - -git: - # We need a deep clone so that we can compute the age of the files using their git history. - depth: 10000 - -install: - - > - export IRIS_TEST_DATA_REF="1696ac3a823a06b95f430670f285ee97671d2cf2"; - export IRIS_TEST_DATA_SUFFIX=$(echo "${IRIS_TEST_DATA_REF}" | sed "s/^v//"); - - # Install miniconda - # ----------------- - - > - echo 'Installing miniconda'; - export CONDA_BASE="https://repo.continuum.io/miniconda/Miniconda"; - wget --quiet ${CONDA_BASE}3-latest-Linux-x86_64.sh -O miniconda.sh; - bash miniconda.sh -b -p ${HOME}/miniconda; - export PATH="${HOME}/miniconda/bin:${PATH}"; - - # Create the basic testing environment - # ------------------------------------ - # Explicitly add defaults channel, see https://github.com/conda/conda/issues/2675 - - > - echo 'Configure conda and create an environment'; - conda config --set always_yes yes --set changeps1 no; - conda config --set show_channel_urls True; - conda config --add channels conda-forge; - conda update --quiet conda; - ENV_NAME='test-environment'; - conda create --quiet -n ${ENV_NAME} python=${PYTHON_VERSION} pip; - source activate ${ENV_NAME}; - - # Customise the testing environment - # --------------------------------- - - > - echo 'Install Iris dependencies'; - CONDA_REQS_GROUPS="test"; - if [[ "${TEST_MINIMAL}" != true ]]; then - CONDA_REQS_GROUPS="${CONDA_REQS_GROUPS} all"; - fi; - if [[ "${TEST_TARGET}" == 'doctest' ]]; then - CONDA_REQS_GROUPS="${CONDA_REQS_GROUPS} docs"; - fi; - CONDA_REQS_FILE="conda-requirements.txt"; - python requirements/gen_conda_requirements.py --groups ${CONDA_REQS_GROUPS} > ${CONDA_REQS_FILE}; - cat ${CONDA_REQS_FILE}; - conda install --quiet -n ${ENV_NAME} --file ${CONDA_REQS_FILE}; - - - PREFIX="${HOME}/miniconda/envs/${ENV_NAME}" - - # Output debug info - - > - conda list -n ${ENV_NAME}; - conda list -n ${ENV_NAME} --explicit; - conda info -a; - -# Pre-load Natural Earth data to avoid multiple, overlapping downloads. -# i.e. There should be no DownloadWarning reports in the log. - - python -c 'import cartopy; cartopy.io.shapereader.natural_earth()' - -# iris test data - - > - if [[ "${TEST_MINIMAL}" != true ]]; then - wget --quiet -O iris-test-data.zip https://github.com/SciTools/iris-test-data/archive/${IRIS_TEST_DATA_REF}.zip; - unzip -q iris-test-data.zip; - mv "iris-test-data-${IRIS_TEST_DATA_SUFFIX}" iris-test-data; - fi - -# set config paths - - > - SITE_CFG="lib/iris/etc/site.cfg"; - echo "[Resources]" > ${SITE_CFG}; - echo "test_data_dir = $(pwd)/iris-test-data/test_data" >> ${SITE_CFG}; - echo "doc_dir = $(pwd)/docs/iris" >> ${SITE_CFG}; - echo "[System]" >> ${SITE_CFG}; - echo "udunits2_path = ${PREFIX}/lib/libudunits2.so" >> ${SITE_CFG}; - - - python setup.py --quiet install - - # TODO : remove when iris doesn't do an integration test requiring iris-grib. -# TODO: uncomment and address the 5 failures and 10 errors in iris-grib. -# - if [[ "${TEST_MINIMAL}" != true ]]; then -# conda install --quiet -n ${ENV_NAME} python-eccodes; -# conda install --quiet -n ${ENV_NAME} --no-deps iris-grib; -# fi - -script: - # Capture install-dir: As a test command must be last for get Travis to check - # the RC, so it's best to start each operation with an absolute cd. - - INSTALL_DIR=$(pwd) - - - > - if [[ ${TEST_TARGET} == 'default' ]]; then - export IRIS_REPO_DIR=${INSTALL_DIR}; - python -m iris.tests.runner --default-tests --system-tests --print-failed-images; - fi - - - if [[ ${TEST_TARGET} == 'example' ]]; then - python -m iris.tests.runner --example-tests --print-failed-images; - fi - - # A call to check "whatsnew" contributions are valid, because the Iris test - # for it needs a *developer* install to be able to find the docs. - - if [[ ${TEST_TARGET} == 'doctest' ]]; then - cd ${INSTALL_DIR}/docs/iris/src/whatsnew; - python aggregate_directory.py --checkonly; - fi - - # When pushing built docs, attempt to make a preliminary whatsnew by calling - # 'aggregate_directory.py', before the build. - - > - if [[ ${PUSH_BUILT_DOCS} == 'true' ]]; then - cd ${INSTALL_DIR}/docs/iris/src/whatsnew; - WHATSNEW=$(ls -d contributions_* 2>/dev/null); - if [[ "$WHATSNEW" != "" ]]; then - python aggregate_directory.py --unreleased; - fi; - fi - - # Build the docs. - - > - if [[ ${TEST_TARGET} == 'doctest' ]]; then - MPL_RC_DIR="${HOME}/.config/matplotlib"; - mkdir -p ${MPL_RC_DIR}; - echo 'backend : agg' > ${MPL_RC_DIR}/matplotlibrc; - echo 'image.cmap : viridis' >> ${MPL_RC_DIR}/matplotlibrc; - cd ${INSTALL_DIR}/docs/iris; - make clean html && make doctest; - fi - - # Split the organisation out of the slug. See https://stackoverflow.com/a/5257398/741316 for description. - - ORG=(${TRAVIS_REPO_SLUG//\// }) - - # When we merge a change to SciTools/iris, we can push docs to github pages. - # At present, only the Python 3.7 "doctest" job does this. - # Results appear at https://scitools-docs.github.io/iris/<>/index.html - - if [[ ${ORG} == "SciTools" && ${TRAVIS_EVENT_TYPE} == 'push' && ${PUSH_BUILT_DOCS} == 'true' ]]; then - cd ${INSTALL_DIR}; - pip install doctr; - doctr deploy --deploy-repo SciTools-docs/iris --built-docs docs/iris/build/html - --key-path .github/deploy_key.scitools-docs.enc - --no-require-master - ${TRAVIS_BRANCH:-${TRAVIS_TAG}}; - fi - diff --git a/CHANGES b/CHANGES index 2364de84a4..b3916a97b6 100644 --- a/CHANGES +++ b/CHANGES @@ -1,5 +1,5 @@ This file is no longer updated and is provided for historical purposes only. -Please see docs/iris/src/whatsnew/ for a changelog. +Please see docs/src/whatsnew/ for a changelog. Release 1.4 (14 June 2013) @@ -373,7 +373,7 @@ Features added Incompatible changes -------------------- -* The Iris data model is now fully aligned with the `CF data model `_ . +* The Iris data model is now fully aligned with the `CF data model `_ . Iris remains file-format independent, as is the underlying CF data model. * Cube merging has been re-written for the new CF data model with the benefit that some corner cases are now better handled. Some users may find that their cubes, once merged, now have a smaller total shape and more intelligent handling of dimension coordinate picking. @@ -433,7 +433,7 @@ Features added given cubes (see :func:`iris.iterate.izip`). * Cell methods will now appear in the printout of a cube. * Supporting software dependency versions have been updated. Of particular note is matplotlib which has gone from version 1.0.1 - up to `1.1.0 `_ . This may have a small impact on + up to `1.1.0 `_ . This may have a small impact on some plot element placements. Incompatible changes diff --git a/CITATION.cff b/CITATION.cff new file mode 100644 index 0000000000..c3fcdd26d5 --- /dev/null +++ b/CITATION.cff @@ -0,0 +1,23 @@ +cff-version: 1.2.0 +message: "If Iris played an important part in your research then please add us to your reference list by using the references below." +title: "Iris" +keywords: + - "cf-metadata" + - "data-analysis" + - "earth-science" + - "grib" + - "netcdf" + - "meteorology" + - "oceanography" + - "space-weather" + - "ugrid" + - "visualisation" +authors: + - name: "Iris contributors" +abstract: "A powerful, format-agnostic, and community-driven Python package for analysing and visualising Earth science data" +license: "BSD-3-Clause" +license-url: "https://spdx.org/licenses/BSD-3-Clause.html" +doi: "10.5281/zenodo.595182" +url: "http://scitools.org.uk/" +repository-code: "https://github.com/SciTools/iris" +type: "software" diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..bb040d21c5 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,133 @@ + +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, caste, color, religion, or sexual +identity and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the overall + community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or advances of + any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email address, + without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +scitools.pub@gmail.com. +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series of +actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or permanent +ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within the +community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.1, available at +[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. + +Community Impact Guidelines were inspired by +[Mozilla's code of conduct enforcement ladder][Mozilla CoC]. + +For answers to common questions about this code of conduct, see the FAQ at +[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at +[https://www.contributor-covenant.org/translations][translations]. + +[homepage]: https://www.contributor-covenant.org +[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html +[Mozilla CoC]: https://github.com/mozilla/diversity +[FAQ]: https://www.contributor-covenant.org/faq +[translations]: https://www.contributor-covenant.org/translations diff --git a/COPYING b/COPYING deleted file mode 100644 index 94a9ed024d..0000000000 --- a/COPYING +++ /dev/null @@ -1,674 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. diff --git a/COPYING.LESSER b/COPYING.LESSER deleted file mode 100644 index 65c5ca88a6..0000000000 --- a/COPYING.LESSER +++ /dev/null @@ -1,165 +0,0 @@ - GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. - - 0. Additional Definitions. - - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. - - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. - - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. - - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". - - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. - - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. - - 1. Exception to Section 3 of the GNU GPL. - - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. - - 2. Conveying Modified Versions. - - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: - - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or - - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. - - 3. Object Code Incorporating Material from Library Header Files. - - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: - - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the object code with a copy of the GNU GPL and this license - document. - - 4. Combined Works. - - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: - - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. - - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. - - d) Do one of the following: - - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. - - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. - - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) - - 5. Combined Libraries. - - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. - - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. - - 6. Revised Versions of the GNU Lesser General Public License. - - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library. diff --git a/INSTALL b/INSTALL deleted file mode 100644 index 9296f97a29..0000000000 --- a/INSTALL +++ /dev/null @@ -1,95 +0,0 @@ -You can either install Iris using the conda package manager or from source. - -Installing using conda ----------------------- - -Iris is available using conda for the following platforms: - * Linux 64-bit, - * Mac OSX 64-bit, and - * Windows 32-bit and 64-bit. - -To install Iris using conda, you must first download and install conda, -for example from http://conda.pydata.org/miniconda.html. - -Once conda is installed, you can install Iris using conda on any platform with -the following command:: - - conda install -c conda-forge iris - -If you wish to run any of the code examples -(see http://scitools.org.uk/iris/docs/latest/examples/index.html) you will also -need the Iris sample data. This can also be installed using conda:: - - conda install -c conda-forge iris-sample-data - -Further documentation on using conda and the features it provides can be found -at http://conda.pydata.org/docs/intro.html. - - -Installing from source ----------------------- - -The latest Iris source release is available from -https://github.com/SciTools/iris. - -Iris makes use of a range of other libraries and python modules. These -dependencies must be in place before you can successfully install -Iris. Once you have satisfied the requirements detailed in the -``requirements`` directory, go to the root of Iris' and run:: - - pip install . - - -In-place build - an alternative for developers -============================================== -We are very keen to encourage contributions to Iris. For this type of -development activity an in-place build can be useful. Once you've cloned -the Iris git repository you can perform an in-place build with:: - - pip install -e . - - -Generating conda requirements -''''''''''''''''''''''''''''' - -Requirements for Iris are stored in the ``requirements`` directory in the root of the source repository. -It is possible to generate a requirements file suitable for use with conda:: - - python requirements/gen_conda_requirements.py > conda_requirements.txt - -This may then be installed with:: - - conda create -n my_iris_env --file conda_requirements.txt - -Alternatively, a full requirements file that includes all optional dependencies can be produced with:: - - python requirements/gen_conda_requirements.py --groups all > conda_requirements.txt - - -Running the tests -''''''''''''''''' - -In order to run the tests, you will need to use the `test` and `docs` groups (we include the `docs` group so that you can run the pull request tests locally). -Hence the commands change to:: - - python requirements/gen_conda_requirements.py --groups test docs > conda_requirements.txt - conda create -n my_iris_env -c conda-forge --file conda_requirements.txt - conda activate my_iris_env # or whatever other name you gave it - pip install -e . - -The tests can then be run with - - python setup.py test - - -Custom site configuration -========================= -The default site configuration values can be overridden by creating the file -``iris/etc/site.cfg``. For example, the following snippet can be used to -specify a non-standard location for your dot executable:: - - [System] - dot_path = /usr/bin/dot - -An example configuration file is available in ``iris/etc/site.cfg.template``. -See :py:func:`iris.config` for further configuration options. diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000..2d1d23e16c --- /dev/null +++ b/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2010, Met Office. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/MANIFEST.in b/MANIFEST.in index efd0534863..e594303d8f 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,21 +1,33 @@ -# Top-level files -include CHANGES COPYING COPYING.LESSER INSTALL +prune .github +prune benchmarks +prune docs +prune etc +recursive-include lib *.cdl *.cml *.json *.md *.py *.template *.txt *.xml +prune requirements +recursive-include requirements *.txt +prune tools +exclude .flake8 +exclude .git-blame-ignore-revs +exclude .git_archival.txt +exclude .gitattributes +exclude .gitignore +exclude .mailmap +exclude .pre-commit-config.yaml +exclude .readthedocs.yml +exclude .ruff.toml +exclude CHANGES +exclude CODE_OF_CONDUCT.md +exclude codecov.yml +include COPYING +include COPYING.LESSER +include CITATION.cff +include LICENSE +exclude Makefile +exclude noxfile.py -# Files from setup.py package_data that are not automatically added to source distributions -recursive-include lib/iris/tests/results *.cml *.cdl *.txt *.xml *.json -recursive-include lib/iris/etc * -include lib/iris/fileformats/_pyke_rules/*.k?b -include lib/iris/tests/stock*.npz - -include requirements/*.txt - -# File required to build docs -recursive-include docs Makefile *.js *.png *.py *.rst -prune docs/iris/build - -# Files required to build std_names module -include tools/generate_std_names.py +# files required to build iris.std_names module include etc/cf-standard-name-table.xml +include tools/generate_std_names.py -global-exclude *.pyc +global-exclude *.py[cod] global-exclude __pycache__ diff --git a/Makefile b/Makefile new file mode 100755 index 0000000000..0bb56edbf9 --- /dev/null +++ b/Makefile @@ -0,0 +1,2 @@ +lockfiles: + python tools/update_lockfiles.py -o requirements/locks requirements/py*.yml \ No newline at end of file diff --git a/README.md b/README.md index fb8660f2ad..7eec86c6da 100644 --- a/README.md +++ b/README.md @@ -1,120 +1,49 @@

- - Iris
+ + Iris

+

- Iris is a powerful, format-agnostic, community-driven Python library for + Iris is a powerful, format-agnostic, community-driven Python package for analysing and visualising Earth science data

+| | | +|------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| ⚙️ CI | [![ci-manifest](https://github.com/SciTools/iris/actions/workflows/ci-manifest.yml/badge.svg)](https://github.com/SciTools/iris/actions/workflows/ci-manifest.yml) [![ci-tests](https://github.com/SciTools/iris/actions/workflows/ci-tests.yml/badge.svg)](https://github.com/SciTools/iris/actions/workflows/ci-tests.yml) [![ci-wheels](https://github.com/SciTools/iris/actions/workflows/ci-wheels.yml/badge.svg)](https://github.com/SciTools/iris/actions/workflows/ci-wheels.yml) [![pre-commit](https://results.pre-commit.ci/badge/github/SciTools/iris/main.svg)](https://results.pre-commit.ci/latest/github/SciTools/iris/main) | +| 💬 Community | [![Contributor Covenant](https://img.shields.io/badge/contributor%20covenant-2.1-4baaaa.svg)](https://www.contributor-covenant.org/version/2/1/code_of_conduct/) [![GH Discussions](https://img.shields.io/badge/github-discussions%20%F0%9F%92%AC-yellow?logo=github&logoColor=lightgrey)](https://github.com/SciTools/iris/discussions) [![twitter](https://img.shields.io/twitter/follow/scitools_iris?color=yellow&label=twitter%7Cscitools_iris&logo=twitter&style=plastic)](https://twitter.com/scitools_iris) | +| 📖 Documentation | [![rtd](https://readthedocs.org/projects/scitools-iris/badge/?version=latest)](https://scitools-iris.readthedocs.io/en/latest/?badge=latest) | +| 📈 Health | [![codecov](https://codecov.io/gh/SciTools/iris/branch/main/graph/badge.svg?token=0GeICSIF3g)](https://codecov.io/gh/SciTools/iris) | +| ✨ Meta | [![Ruff](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json)](https://github.com/astral-sh/ruff) [![NEP29](https://raster.shields.io/badge/follows-NEP29-orange.png)](https://numpy.org/neps/nep-0029-deprecation_policy.html) [![license - bds-3-clause](https://img.shields.io/github/license/SciTools/iris)](https://github.com/SciTools/iris/blob/main/LICENSE) [![conda platform](https://img.shields.io/conda/pn/conda-forge/iris.svg)](https://anaconda.org/conda-forge/iris) | +| 📦 Package | [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.595182.svg)](https://doi.org/10.5281/zenodo.595182) [![conda-forge](https://img.shields.io/conda/vn/conda-forge/iris?color=orange&label=conda-forge&logo=conda-forge&logoColor=white)](https://anaconda.org/conda-forge/iris) [![pypi](https://img.shields.io/pypi/v/scitools-iris?color=orange&label=pypi&logo=python&logoColor=white)](https://pypi.org/project/scitools-iris/) [![pypi - python version](https://img.shields.io/pypi/pyversions/scitools-iris.svg?color=orange&logo=python&label=python&logoColor=white)](https://pypi.org/project/scitools-iris/) | +| 🧰 Repo | [![commits-since](https://img.shields.io/github/commits-since/SciTools/iris/latest.svg)](https://github.com/SciTools/iris/commits/main) [![contributors](https://img.shields.io/github/contributors/SciTools/iris)](https://github.com/SciTools/iris/graphs/contributors) [![release](https://img.shields.io/github/v/release/scitools/iris)](https://github.com/SciTools/iris/releases) | +| | +

- - -Travis-CI - -conda-forge downloads - -# contributors - -Latest version - -Stable docs - -Commits since last release - -Latest docs - -zenodo +For documentation see the +latest +developer version or the most recent released +stable version.

-
- - - -

Table of contents

- -[](TOC) - -+ [Overview](#overview) -+ [Documentation](#documentation) -+ [Installation](#installation) -+ [Copyright and licence](#copyright-and-licence) -+ [Get in touch](#get-in-touch) -+ [Contributing](#contributing) - -[](TOC) - -# Overview - -Iris implements a data model based on the [CF conventions](http://cfconventions.org/) -giving you a powerful, format-agnostic interface for working with your data. -It excels when working with multi-dimensional Earth Science data, where tabular -representations become unwieldy and inefficient. - -[CF Standard names](http://cfconventions.org/standard-names.html), -[units](https://github.com/SciTools/cf_units), and coordinate metadata -are built into Iris, giving you a rich and expressive interface for maintaining -an accurate representation of your data. Its treatment of data and - associated metadata as first-class objects includes: - - * a visualisation interface based on [matplotlib](https://matplotlib.org/) and - [cartopy](https://scitools.org.uk/cartopy/docs/latest/), - * unit conversion, - * subsetting and extraction, - * merge and concatenate, - * aggregations and reductions (including min, max, mean and weighted averages), - * interpolation and regridding (including nearest-neighbor, linear and area-weighted), and - * operator overloads (``+``, ``-``, ``*``, ``/``, etc.) - -A number of file formats are recognised by Iris, including CF-compliant NetCDF, GRIB, -and PP, and it has a plugin architecture to allow other formats to be added seamlessly. - -Building upon [NumPy](http://www.numpy.org/) and [dask](https://dask.pydata.org/en/latest/), -Iris scales from efficient single-machine workflows right through to multi-core clusters and HPC. -Interoperability with packages from the wider scientific Python ecosystem comes from Iris' -use of standard NumPy/dask arrays as its underlying data storage. +## [#ShowYourStripes](https://showyourstripes.info/s/globe) -# Documentation - - Stable docs The documentation for *stable released versions* of Iris, including a user guide, example code, and gallery. - - Latest docs The documentation for the *latest development version* of Iris. - - -# Installation - -The easiest way to install Iris is with [conda](https://conda.io/miniconda.html): - - conda install -c conda-forge iris - -Detailed instructions, including information on installing from source, -are available in [INSTALL](INSTALL). - -# Get in touch - - * Report bugs, or suggest new features using an Issue or Pull Request on [Github](https://github.com/SciTools/iris). You can also comment on existing Issues and Pull Requests. - * For discussions from a user perspective you could join our [SciTools Users Google Group](https://groups.google.com/forum/#!forum/scitools-iris). - * For those involved in developing Iris we also have an [Iris Developers Google Group](https://groups.google.com/forum/#!forum/scitools-iris-dev). - * [StackOverflow](https://stackoverflow.com/questions/tagged/python-iris) For "How do I?". +

+ + #showyourstripes Global 1850-2021 +

-# Copyright and licence +**Graphics and Lead Scientist**: [Ed Hawkins](https://www.met.reading.ac.uk/~ed/home/index.php), National Centre for Atmospheric Science, University of Reading. -Iris may be freely distributed, modified and used commercially under the terms -of its [GNU LGPLv3 license](COPYING.LESSER). +**Data**: Berkeley Earth, NOAA, UK Met Office, MeteoSwiss, DWD, SMHI, UoR, Meteo France & ZAMG. -# Contributing -Information on how to contribute can be found in the [Iris developer guide](https://scitools.org.uk/iris/docs/latest/developers_guide/index.html). +

+#ShowYourStripes is distributed under a +Creative Commons Attribution 4.0 International License + + creative-commons-by +

-(C) British Crown Copyright 2010 - 2019, Met Office diff --git a/benchmarks/README.md b/benchmarks/README.md new file mode 100644 index 0000000000..911d5f7833 --- /dev/null +++ b/benchmarks/README.md @@ -0,0 +1,163 @@ +# Iris Performance Benchmarking + +Iris uses an [Airspeed Velocity](https://github.com/airspeed-velocity/asv) +(ASV) setup to benchmark performance. This is primarily designed to check for +performance shifts between commits using statistical analysis, but can also +be easily repurposed for manual comparative and scalability analyses. + +The benchmarks are automatically run overnight +[by a GitHub Action](../.github/workflows/benchmark.yml), with any notable +shifts in performance being flagged in a new GitHub issue. + +## Running benchmarks + +On GitHub: a Pull Request can be benchmarked by adding the +https://github.com/SciTools/iris/labels/benchmark_this +label to the PR (to run a second time: just remove and re-add the label). +Note that a benchmark run could take an hour or more to complete. +This runs a comparison between the PR branch's ``HEAD`` and its merge-base with +the PR's base branch, thus showing performance differences introduced +by the PR. (This run is managed by +[the aforementioned GitHub Action](../.github/workflows/benchmark.yml)). + +To run locally: the **benchmark runner** provides conveniences for +common benchmark setup and run tasks, including replicating the automated +overnight run locally. This is accessed via the Nox `benchmarks` session - see +`nox -s benchmarks -- --help` for detail (_see also: +[bm_runner.py](./bm_runner.py)_). Alternatively you can directly run `asv ...` +commands from this directory (you will still need Nox installed - see +[Benchmark environments](#benchmark-environments)). + +A significant portion of benchmark run time is environment management. Run-time +can be reduced by placing the benchmark environment on the same file system as +your +[Conda package cache](https://conda.io/projects/conda/en/latest/user-guide/configuration/use-condarc.html#specify-pkg-directories), +if it is not already. You can achieve this by either: + +- Temporarily reconfiguring `ENV_PARENT` in `delegated_env_commands` + in [asv.conf.json](asv.conf.json) to reference a location on the same file + system as the Conda package cache. +- Using an alternative Conda package cache location during the benchmark run, + e.g. via the `$CONDA_PKGS_DIRS` environment variable. +- Moving your Iris repo to the same file system as the Conda package cache. + +### Environment variables + +* `OVERRIDE_TEST_DATA_REPOSITORY` - required - some benchmarks use +`iris-test-data` content, and your local `site.cfg` is not available for +benchmark scripts. The benchmark runner defers to any value already set in +the shell, but will otherwise download `iris-test-data` and set the variable +accordingly. +* `DATA_GEN_PYTHON` - required - path to a Python executable that can be +used to generate benchmark test objects/files; see +[Data generation](#data-generation). The benchmark runner sets this +automatically, but will defer to any value already set in the shell. Note that +[Mule](https://github.com/metomi/mule) will be automatically installed into +this environment, and sometimes +[iris-test-data](https://github.com/SciTools/iris-test-data) (see +`OVERRIDE_TEST_DATA_REPOSITORY`). +* `BENCHMARK_DATA` - optional - path to a directory for benchmark synthetic +test data, which the benchmark scripts will create if it doesn't already +exist. Defaults to `/benchmarks/.data/` if not set. Note that some of +the generated files, especially in the 'SPerf' suite, are many GB in size so +plan accordingly. +* `ON_DEMAND_BENCHMARKS` - optional - when set (to any value): benchmarks +decorated with `@on_demand_benchmark` are included in the ASV run. Usually +coupled with the ASV `--bench` argument to only run the benchmark(s) of +interest. Is set during the benchmark runner `cperf` and `sperf` sub-commands. +* `ASV_COMMIT_ENVS` - optional - instruct the +[delegated environment management](#benchmark-environments) to create a +dedicated environment for each commit being benchmarked when set (to any +value). This means that benchmarking commits with different environment +requirements will not be delayed by repeated environment setup - especially +relevant given the [benchmark runner](bm_runner.py)'s use of +[--interleave-rounds](https://asv.readthedocs.io/en/stable/commands.html?highlight=interleave-rounds#asv-run), +or any time you know you will repeatedly benchmark the same commit. **NOTE:** +Iris environments are large so this option can consume a lot of disk space. + +## Writing benchmarks + +[See the ASV docs](https://asv.readthedocs.io/) for full detail. + +### What benchmarks to write + +It is not possible to maintain a full suite of 'unit style' benchmarks: + +* Benchmarks take longer to run than tests. +* Small benchmarks are more vulnerable to noise - they report a lot of false +positive regressions. + +We therefore recommend writing benchmarks representing scripts or single +operations that are likely to be run at the user level. + +The drawback of this approach: a reported regression is less likely to reveal +the root cause (e.g. if a commit caused a regression in coordinate-creation +time, but the only benchmark covering this was for file-loading). Be prepared +for manual investigations; and consider committing any useful benchmarks as +[on-demand benchmarks](#on-demand-benchmarks) for future developers to use. + +### Data generation +**Important:** be sure not to use the benchmarking environment to generate any +test objects/files, as this environment changes with each commit being +benchmarked, creating inconsistent benchmark 'conditions'. The +[generate_data](./benchmarks/generate_data/__init__.py) module offers a +solution; read more detail there. + +### ASV re-run behaviour + +Note that ASV re-runs a benchmark multiple times between its `setup()` routine. +This is a problem for benchmarking certain Iris operations such as data +realisation, since the data will no longer be lazy after the first run. +Consider writing extra steps to restore objects' original state _within_ the +benchmark itself. + +If adding steps to the benchmark will skew the result too much then re-running +can be disabled by setting an attribute on the benchmark: `number = 1`. To +maintain result accuracy this should be accompanied by increasing the number of +repeats _between_ `setup()` calls using the `repeat` attribute. +`warmup_time = 0` is also advisable since ASV performs independent re-runs to +estimate run-time, and these will still be subject to the original problem. + +### Custom benchmarks + +Iris benchmarking implements custom benchmark types, such as a `tracemalloc` +benchmark to measure memory growth. See [custom_bms/](./custom_bms) for more +detail. + +### Scaling / non-Scaling Performance Differences + +**(We no longer advocate the below for benchmarks run during CI, given the +limited available runtime and risk of false-positives. It remains useful for +manual investigations).** + +When comparing performance between commits/file-type/whatever it can be helpful +to know if the differences exist in scaling or non-scaling parts of the Iris +functionality in question. This can be done using a size parameter, setting +one value to be as small as possible (e.g. a scalar `Cube`), and the other to +be significantly larger (e.g. a 1000x1000 `Cube`). Performance differences +might only be seen for the larger value, or the smaller, or both, getting you +closer to the root cause. + +### On-demand benchmarks + +Some benchmarks provide useful insight but are inappropriate to be included in +a benchmark run by default, e.g. those with long run-times or requiring a local +file. These benchmarks should be decorated with `@on_demand_benchmark` +(see [benchmarks init](./benchmarks/__init__.py)), which +sets the benchmark to only be included in a run when the `ON_DEMAND_BENCHMARKS` +environment variable is set. Examples include the CPerf and SPerf benchmark +suites for the UK Met Office NG-VAT project. + +## Benchmark environments + +We have disabled ASV's standard environment management, instead using an +environment built using the same Nox scripts as Iris' test environments. This +is done using ASV's plugin architecture - see +[asv_delegated_conda.py](asv_delegated_conda.py) and the extra config items in +[asv.conf.json](asv.conf.json). + +(ASV is written to control the environment(s) that benchmarks are run in - +minimising external factors and also allowing it to compare between a matrix +of dependencies (each in a separate environment). We have chosen to sacrifice +these features in favour of testing each commit with its intended dependencies, +controlled by Nox + lock-files). diff --git a/benchmarks/asv.conf.json b/benchmarks/asv.conf.json new file mode 100644 index 0000000000..2857c90ad7 --- /dev/null +++ b/benchmarks/asv.conf.json @@ -0,0 +1,64 @@ +{ + "version": 1, + "project": "scitools-iris", + "project_url": "https://github.com/SciTools/iris", + "repo": "..", + "environment_type": "delegated", + "show_commit_url": "https://github.com/scitools/iris/commit/", + "branches": ["upstream/main"], + + "benchmark_dir": "./benchmarks", + "env_dir": ".asv/env", + "results_dir": ".asv/results", + "html_dir": ".asv/html", + "plugins": [".asv_delegated"], + + "delegated_env_commands_comment": [ + "The command(s) that create/update an environment correctly for the", + "checked-out commit. Command(s) format follows `build_command`:", + " https://asv.readthedocs.io/en/stable/asv.conf.json.html#build-command-install-command-uninstall-command", + + "The commit key indicates the earliest commit where the command(s)", + "will work.", + + "Differences from `build_command`:", + " * See: https://asv.readthedocs.io/en/stable/asv.conf.json.html#build-command-install-command-uninstall-command", + " * Env vars limited to those set outside build time.", + " (e.g. `{conf_dir}` available but `{build_dir}` not)", + " * Run in the same environment as the ASV install itself.", + + "Mandatory format for the first 'command' within each commit:", + " * `ENV_PARENT=path/to/parent/directory/of/env-directory`", + " * Can contain env vars (e.g. `{conf_dir}`)", + " * `ENV_PARENT` available as `{env_parent}` in subsequent commands", + " * The environment will be detected as the most recently updated", + " environment in `{env_parent}`." + + ], + "delegated_env_commands": { + "c8a663a0": [ + "ENV_PARENT={conf_dir}/.asv/env/nox312", + "PY_VER=3.12 nox --envdir={env_parent} --session=tests --install-only --no-error-on-external-run --verbose" + ], + "d58fca7e": [ + "ENV_PARENT={conf_dir}/.asv/env/nox311", + "PY_VER=3.11 nox --envdir={env_parent} --session=tests --install-only --no-error-on-external-run --verbose" + ], + "44fae030": [ + "ENV_PARENT={conf_dir}/.asv/env/nox310", + "PY_VER=3.10 nox --envdir={env_parent} --session=tests --install-only --no-error-on-external-run --verbose" + ] + }, + + "command_comment": [ + "We know that the Nox command takes care of installation in each", + "environment, and in the case of Iris no specialised uninstall or", + "build commands are needed to get it working.", + + "We do however need to install the custom benchmarks for them to be", + "usable." + ], + "install_command": [], + "uninstall_command": [], + "build_command": ["python {conf_dir}/custom_bms/install.py"] +} diff --git a/benchmarks/asv_delegated.py b/benchmarks/asv_delegated.py new file mode 100644 index 0000000000..fa5312d392 --- /dev/null +++ b/benchmarks/asv_delegated.py @@ -0,0 +1,350 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""ASV plug-in providing an alternative :class:`asv.environments.Environment` subclass. + +Preps an environment via custom user scripts, then uses that as the +benchmarking environment. + +""" + +from contextlib import contextmanager, suppress +from os import environ +from os.path import getmtime +from pathlib import Path +import sys + +from asv import util as asv_util +from asv.console import log +from asv.environment import Environment, EnvironmentUnavailable +from asv.repo import Repo +from asv.util import ProcessError + + +class EnvPrepCommands: + """A container for the environment preparation commands for a given commit. + + Designed to read a value from the `delegated_env_commands` in the ASV + config, and validate that the command(s) are structured correctly. + """ + + ENV_PARENT_VAR = "ENV_PARENT" + env_parent: Path + commands: list[str] + + def __init__(self, environment: Environment, raw_commands: tuple[str]): + env_var = self.ENV_PARENT_VAR + raw_commands_list = list(raw_commands) + + (first_command,) = environment._interpolate_commands(raw_commands_list[0]) + env: dict + command, env, return_codes, cwd = first_command + + valid = command == [] + valid = valid and return_codes == {0} + valid = valid and cwd is None + valid = valid and list(env.keys()) == [env_var] + if not valid: + message = ( + "First command MUST ONLY " + f"define the {env_var} env var, with no command e.g: " + f"`{env_var}=foo/`. Got: \n {raw_commands_list[0]}" + ) + raise ValueError(message) + + self.env_parent = Path(env[env_var]).resolve() + self.commands = raw_commands_list[1:] + + +class CommitFinder(dict[str, EnvPrepCommands]): + """A specialised dict for finding the appropriate env prep script for a commit.""" + + def __call__(self, repo: Repo, commit_hash: str): + """Return the latest env prep script that is earlier than the given commit.""" + + def validate_commit(commit: str, is_lookup: bool) -> None: + try: + _ = repo.get_date(commit) + except ProcessError: + if is_lookup: + message_start = "Lookup commit" + else: + message_start = "Requested commit" + repo_path = getattr(repo, "_path", "unknown") + message = f"{message_start}: {commit} not found in repo: {repo_path}" + raise KeyError(message) + + for lookup in self.keys(): + validate_commit(lookup, is_lookup=True) + validate_commit(commit_hash, is_lookup=False) + + def parent_distance(parent_hash: str) -> int: + range_spec = repo.get_range_spec(parent_hash, commit_hash) + parents = repo.get_hashes_from_range(range_spec) + + if parent_hash[:8] == commit_hash[:8]: + distance = 0 + elif len(parents) == 0: + distance = -1 + else: + distance = len(parents) + return distance + + parentage = {commit: parent_distance(commit) for commit in self.keys()} + parentage = {k: v for k, v in parentage.items() if v >= 0} + if len(parentage) == 0: + message = f"No env prep script available for commit: {commit_hash} ." + raise KeyError(message) + else: + parentage = dict(sorted(parentage.items(), key=lambda item: item[1])) + commit = next(iter(parentage)) + content = self[commit] + return content + + +class Delegated(Environment): + """Manage a benchmark environment using custom user scripts, run at each commit. + + Ignores user input variations - ``matrix`` / ``pythons`` / + ``exclude``, since environment is being managed outside ASV. + + A vanilla :class:`asv.environment.Environment` is created for containing + the expected ASV configuration files and checked-out project. The actual + 'functional' environment is created/updated using the command(s) specified + in the config ``delegated_env_commands``, then the location is recorded via + a symlink within the ASV environment. The symlink is used as the + environment path used for any executable calls (e.g. + ``python my_script.py``). + + """ + + tool_name = "delegated" + """Required by ASV as a unique identifier of the environment type.""" + + DELEGATED_LINK_NAME = "delegated_env" + """The name of the symlink to the delegated environment.""" + + COMMIT_ENVS_VAR = "ASV_COMMIT_ENVS" + """Env var that instructs a dedicated environment be created per commit.""" + + def __init__(self, conf, python, requirements, tagged_env_vars): + """Get a 'delegated' environment based on the given ASV config object. + + Parameters + ---------- + conf : dict + ASV configuration object. + + python : str + Ignored - environment management is delegated. The value is always + ``DELEGATED``. + + requirements : dict (str -> str) + Ignored - environment management is delegated. The value is always + an empty dict. + + tagged_env_vars : dict (tag, key) -> value + Ignored - environment management is delegated. The value is always + an empty dict. + + Raises + ------ + EnvironmentUnavailable + The original environment or delegated environment cannot be created. + + """ + ignored = [] + if python: + ignored.append(f"{python=}") + if requirements: + ignored.append(f"{requirements=}") + if tagged_env_vars: + ignored.append(f"{tagged_env_vars=}") + message = ( + f"Ignoring ASV setting(s): {', '.join(ignored)}. Benchmark " + "environment management is delegated to third party script(s)." + ) + log.warning(message) + self._python = "DELEGATED" + self._requirements = {} + self._tagged_env_vars = {} + super().__init__( + conf, + self._python, + self._requirements, + self._tagged_env_vars, + ) + + self._path_undelegated = Path(self._path) + """Preserves the 'true' path of the environment so that self._path can + be safely modified and restored.""" + + env_commands = getattr(conf, "delegated_env_commands") + try: + env_prep_commands = { + commit: EnvPrepCommands(self, commands) + for commit, commands in env_commands.items() + } + except ValueError as err: + message = f"Problem handling `delegated_env_commands`:\n{err}" + log.error(message) + raise EnvironmentUnavailable(message) + self._env_prep_lookup = CommitFinder(**env_prep_commands) + """An object that can be called downstream to get the appropriate + env prep script for a given repo and commit.""" + + @property + def _path_delegated(self) -> Path: + """The path of the symlink to the delegated environment.""" + return self._path_undelegated / self.DELEGATED_LINK_NAME + + @property + def _delegated_found(self) -> bool: + """Whether self._path_delegated successfully resolves to a directory.""" + resolved = None + with suppress(FileNotFoundError): + resolved = self._path_delegated.resolve(strict=True) + result = resolved is not None and resolved.is_dir() + return result + + def _symlink_to_delegated(self, delegated_env_path: Path) -> None: + """Create the symlink to the delegated environment.""" + self._path_delegated.unlink(missing_ok=True) + self._path_delegated.parent.mkdir(parents=True, exist_ok=True) + self._path_delegated.symlink_to(delegated_env_path, target_is_directory=True) + assert self._delegated_found + + def _setup(self): + """Temporarily try to set the user's active env as the delegated env. + + Environment prep will be run anyway once ASV starts checking out + commits, but this step tries to provide a usable environment (with + python, etc.) at the moment that ASV expects it. + + """ + current_env = Path(sys.executable).parents[1] + message = ( + "Temporarily using user's active environment as benchmarking " + f"environment: {current_env} . " + ) + try: + self._symlink_to_delegated(current_env) + _ = self.find_executable("python") + except Exception: + message = ( + f"Delegated environment {self.name} not yet set up (unable to " + "determine current environment)." + ) + self._path_delegated.unlink(missing_ok=True) + + message += "Correct environment will be set up at the first commit checkout." + log.warning(message) + + def _prep_env(self, repo: Repo, commit_hash: str) -> None: + """Prepare the delegated environment for the given commit hash.""" + message = ( + f"Running delegated environment management for: {self.name} " + f"at commit: {commit_hash[:8]}" + ) + log.info(message) + + env_prep: EnvPrepCommands + try: + env_prep = self._env_prep_lookup(repo, commit_hash) + except KeyError as err: + message = f"Problem finding env prep commands: {err}" + log.error(message) + raise EnvironmentUnavailable(message) + + new_env_per_commit = self.COMMIT_ENVS_VAR in environ + if new_env_per_commit: + env_parent = env_prep.env_parent / commit_hash[:8] + else: + env_parent = env_prep.env_parent + + # See :meth:`Environment._interpolate_commands`. + # All ASV-namespaced env vars are available in the below format when + # interpolating commands: + # ASV_FOO_BAR = {foo_bar} + # We want the env parent path to be one of those available. + global_key = f"ASV_{EnvPrepCommands.ENV_PARENT_VAR}" + self._global_env_vars[global_key] = str(env_parent) + + # The project checkout. + build_dir = Path(self._build_root) / self._repo_subdir + + # Run the script(s) for delegated environment creation/updating. + # (An adaptation of :meth:`Environment._interpolate_and_run_commands`). + for command, env, return_codes, cwd in self._interpolate_commands( + env_prep.commands + ): + local_envs = dict(environ) + local_envs.update(env) + if cwd is None: + cwd = str(build_dir) + _ = asv_util.check_output( + command, + timeout=self._install_timeout, + cwd=cwd, + env=local_envs, + valid_return_codes=return_codes, + ) + + # Find the environment created/updated by running env_prep.commands. + # The most recently updated directory in env_parent. + delegated_env_path = sorted( + env_parent.glob("*"), + key=getmtime, + reverse=True, + )[0] + # Record the environment's path via a symlink within this environment. + self._symlink_to_delegated(delegated_env_path) + + message = f"Environment {self.name} updated to spec at {commit_hash[:8]}" + log.info(message) + + def checkout_project(self, repo: Repo, commit_hash: str) -> None: + """Check out the working tree of the project at given commit hash.""" + super().checkout_project(repo, commit_hash) + self._prep_env(repo, commit_hash) + + @contextmanager + def _delegate_path(self): + """Context manager to use the delegated env path as this env's path.""" + if not self._delegated_found: + message = f"Delegated environment not found at: {self._path_delegated}" + log.error(message) + raise EnvironmentUnavailable(message) + + try: + self._path = str(self._path_delegated) + yield + finally: + self._path = str(self._path_undelegated) + + def find_executable(self, executable): + """Find an executable (e.g. python, pip) in the DELEGATED environment. + + Raises + ------ + OSError + If the executable is not found in the environment. + """ + if not self._delegated_found: + # Required during environment setup. OSError expected if executable + # not found. + raise OSError + + with self._delegate_path(): + return super().find_executable(executable) + + def run_executable(self, executable, args, **kwargs): + """Run a given executable (e.g. python, pip) in the DELEGATED environment.""" + with self._delegate_path(): + return super().run_executable(executable, args, **kwargs) + + def run(self, args, **kwargs): + # This is not a specialisation - just implementing the abstract method. + log.debug(f"Running '{' '.join(args)}' in {self.name}") + return self.run_executable("python", args, **kwargs) diff --git a/benchmarks/benchmarks/__init__.py b/benchmarks/benchmarks/__init__.py new file mode 100644 index 0000000000..30a991a879 --- /dev/null +++ b/benchmarks/benchmarks/__init__.py @@ -0,0 +1,55 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Common code for benchmarks.""" + +from os import environ +import tracemalloc + +import numpy as np + + +def disable_repeat_between_setup(benchmark_object): + """Benchmark where object persistence would be inappropriate (decorator). + + E.g: + + * Benchmarking data realisation + * Benchmarking Cube coord addition + + Can be applied to benchmark classes/methods/functions. + + https://asv.readthedocs.io/en/stable/benchmarks.html#timing-benchmarks + + """ + # Prevent repeat runs between setup() runs - object(s) will persist after 1st. + benchmark_object.number = 1 + # Compensate for reduced certainty by increasing number of repeats. + # (setup() is run between each repeat). + # Minimum 5 repeats, run up to 30 repeats / 20 secs whichever comes first. + benchmark_object.repeat = (5, 30, 20.0) + # ASV uses warmup to estimate benchmark time before planning the real run. + # Prevent this, since object(s) will persist after first warmup run, + # which would give ASV misleading info (warmups ignore ``number``). + benchmark_object.warmup_time = 0.0 + + return benchmark_object + + +def on_demand_benchmark(benchmark_object): + """Disable these benchmark(s) unless ON_DEMAND_BENCHARKS env var is set. + + This is a decorator. + + For benchmarks that, for whatever reason, should not be run by default. + E.g: + + * Require a local file + * Used for scalability analysis instead of commit monitoring. + + Can be applied to benchmark classes/methods/functions. + + """ + if "ON_DEMAND_BENCHMARKS" in environ: + return benchmark_object diff --git a/benchmarks/benchmarks/aggregate_collapse.py b/benchmarks/benchmarks/aggregate_collapse.py new file mode 100644 index 0000000000..4d5d2923bc --- /dev/null +++ b/benchmarks/benchmarks/aggregate_collapse.py @@ -0,0 +1,212 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Benchmarks relating to :meth:`iris.cube.CubeList.merge` and ``concatenate``.""" + +import warnings + +import numpy as np + +from iris import analysis, coords, cube +from iris.warnings import IrisVagueMetadataWarning + +from .generate_data.stock import realistic_4d_w_everything + + +class AggregationMixin: + params = [[False, True]] + param_names = ["Lazy operations"] + + def setup(self, lazy_run: bool): + warnings.filterwarnings("ignore", message="Ignoring a datum") + warnings.filterwarnings("ignore", category=IrisVagueMetadataWarning) + cube = realistic_4d_w_everything(lazy=lazy_run) + + for cm in cube.cell_measures(): + cube.remove_cell_measure(cm) + for av in cube.ancillary_variables(): + cube.remove_ancillary_variable(av) + + agg_mln_data = np.arange(0, 70, 10) + agg_mln_repeat = np.repeat(agg_mln_data, 10) + + cube = cube[..., :10, :10] + + self.mln_aux = "aggregatable" + self.mln = "model_level_number" + agg_mln_coord = coords.AuxCoord(points=agg_mln_repeat, long_name=self.mln_aux) + + if lazy_run: + agg_mln_coord.points = agg_mln_coord.lazy_points() + cube.add_aux_coord(agg_mln_coord, 1) + self.cube = cube + + +class Aggregation(AggregationMixin): + def time_aggregated_by_MEAN(self, _): + _ = self.cube.aggregated_by(self.mln_aux, analysis.MEAN).data + + def time_aggregated_by_COUNT(self, _): + _ = self.cube.aggregated_by( + self.mln_aux, analysis.COUNT, function=lambda values: values > 280 + ).data + + def time_aggregated_by_GMEAN(self, _): + _ = self.cube.aggregated_by(self.mln_aux, analysis.GMEAN).data + + def time_aggregated_by_HMEAN(self, _): + _ = self.cube.aggregated_by(self.mln_aux, analysis.HMEAN).data + + def time_aggregated_by_MAX_RUN(self, _): + _ = self.cube.aggregated_by( + self.mln_aux, analysis.MAX_RUN, function=lambda values: values > 280 + ).data + + def time_aggregated_by_MAX(self, _): + _ = self.cube.aggregated_by(self.mln_aux, analysis.MAX).data + + def time_aggregated_by_MEDIAN(self, _): + _ = self.cube.aggregated_by(self.mln_aux, analysis.MEDIAN).data + + def time_aggregated_by_MIN(self, _): + _ = self.cube.aggregated_by(self.mln_aux, analysis.MIN).data + + def time_aggregated_by_PEAK(self, _): + _ = self.cube.aggregated_by(self.mln_aux, analysis.PEAK).data + + def time_aggregated_by_PERCENTILE(self, _): + _ = self.cube.aggregated_by( + self.mln_aux, analysis.PERCENTILE, percent=[10, 50, 90] + ).data + + def time_aggregated_by_FAST_PERCENTILE(self, _): + _ = self.cube.aggregated_by( + self.mln_aux, + analysis.PERCENTILE, + mdtol=0, + percent=[10, 50, 90], + fast_percentile_method=True, + ).data + + def time_aggregated_by_PROPORTION(self, _): + _ = self.cube.aggregated_by( + self.mln_aux, + analysis.PROPORTION, + function=lambda values: values > 280, + ).data + + def time_aggregated_by_STD_DEV(self, _): + _ = self.cube.aggregated_by(self.mln_aux, analysis.STD_DEV).data + + def time_aggregated_by_VARIANCE(self, _): + _ = self.cube.aggregated_by(self.mln_aux, analysis.VARIANCE).data + + def time_aggregated_by_RMS(self, _): + _ = self.cube.aggregated_by(self.mln_aux, analysis.RMS).data + + def time_collapsed_by_MEAN(self, _): + _ = self.cube.collapsed(self.mln, analysis.MEAN).data + + def time_collapsed_by_COUNT(self, _): + _ = self.cube.collapsed( + self.mln, analysis.COUNT, function=lambda values: values > 280 + ).data + + def time_collapsed_by_GMEAN(self, _): + _ = self.cube.collapsed(self.mln, analysis.GMEAN).data + + def time_collapsed_by_HMEAN(self, _): + _ = self.cube.collapsed(self.mln, analysis.HMEAN).data + + def time_collapsed_by_MAX_RUN(self, _): + _ = self.cube.collapsed( + self.mln, analysis.MAX_RUN, function=lambda values: values > 280 + ).data + + def time_collapsed_by_MAX(self, _): + _ = self.cube.collapsed(self.mln, analysis.MAX).data + + def time_collapsed_by_MEDIAN(self, _): + _ = self.cube.collapsed(self.mln, analysis.MEDIAN).data + + def time_collapsed_by_MIN(self, _): + _ = self.cube.collapsed(self.mln, analysis.MIN).data + + def time_collapsed_by_PEAK(self, _): + _ = self.cube.collapsed(self.mln, analysis.PEAK).data + + def time_collapsed_by_PERCENTILE(self, _): + _ = self.cube.collapsed( + self.mln, analysis.PERCENTILE, percent=[10, 50, 90] + ).data + + def time_collapsed_by_FAST_PERCENTILE(self, _): + _ = self.cube.collapsed( + self.mln, + analysis.PERCENTILE, + mdtol=0, + percent=[10, 50, 90], + fast_percentile_method=True, + ).data + + def time_collapsed_by_PROPORTION(self, _): + _ = self.cube.collapsed( + self.mln, analysis.PROPORTION, function=lambda values: values > 280 + ).data + + def time_collapsed_by_STD_DEV(self, _): + _ = self.cube.collapsed(self.mln, analysis.STD_DEV).data + + def time_collapsed_by_VARIANCE(self, _): + _ = self.cube.collapsed(self.mln, analysis.VARIANCE).data + + def time_collapsed_by_RMS(self, _): + _ = self.cube.collapsed(self.mln, analysis.RMS).data + + +class WeightedAggregation(AggregationMixin): + def setup(self, lazy_run): + super().setup(lazy_run) + + weights = np.linspace(0, 1, 70) + weights = np.broadcast_to(weights, self.cube.shape[:2]) + weights = np.broadcast_to(weights.T, self.cube.shape[::-1]) + weights = weights.T + + self.weights = weights + + ## currently has problems with indexing weights + # def time_w_aggregated_by_WPERCENTILE(self, _): + # _ = self.cube.aggregated_by( + # self.mln_aux, analysis.WPERCENTILE, weights=self.weights, percent=[10, 50, 90] + # ).data + + def time_w_aggregated_by_SUM(self, _): + _ = self.cube.aggregated_by( + self.mln_aux, analysis.SUM, weights=self.weights + ).data + + def time_w_aggregated_by_RMS(self, _): + _ = self.cube.aggregated_by( + self.mln_aux, analysis.RMS, weights=self.weights + ).data + + def time_w_aggregated_by_MEAN(self, _): + _ = self.cube.aggregated_by( + self.mln_aux, analysis.MEAN, weights=self.weights + ).data + + def time_w_collapsed_by_WPERCENTILE(self, _): + _ = self.cube.collapsed( + self.mln, analysis.WPERCENTILE, weights=self.weights, percent=[10, 50, 90] + ).data + + def time_w_collapsed_by_SUM(self, _): + _ = self.cube.collapsed(self.mln, analysis.SUM, weights=self.weights).data + + def time_w_collapsed_by_RMS(self, _): + _ = self.cube.collapsed(self.mln, analysis.RMS, weights=self.weights).data + + def time_w_collapsed_by_MEAN(self, _): + _ = self.cube.collapsed(self.mln, analysis.MEAN, weights=self.weights).data diff --git a/benchmarks/benchmarks/cperf/__init__.py b/benchmarks/benchmarks/cperf/__init__.py new file mode 100644 index 0000000000..05a086bc44 --- /dev/null +++ b/benchmarks/benchmarks/cperf/__init__.py @@ -0,0 +1,92 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Benchmarks for the CPerf scheme of the UK Met Office's NG-VAT project. + +CPerf = comparing performance working with data in UM versus LFRic formats. + +Files available from the UK Met Office: + moo ls moose:/adhoc/projects/avd/asv/data_for_nightly_tests/ +""" + +import numpy as np + +from iris import load_cube + +from ..generate_data import BENCHMARK_DATA +from ..generate_data.ugrid import make_cubesphere_testfile + +# The data of the core test UM files has dtype=np.float32 shape=(1920, 2560) +_UM_DIMS_YX = (1920, 2560) +# The closest cubesphere size in terms of datapoints is sqrt(1920*2560 / 6) +# This gives ~= 905, i.e. "C905" +_N_CUBESPHERE_UM_EQUIVALENT = int(np.sqrt(np.prod(_UM_DIMS_YX) / 6)) + + +class SingleDiagnosticMixin: + """For use in any benchmark classes that work on a single diagnostic file.""" + + params = [ + ["LFRic", "UM", "UM_lbpack0", "UM_netcdf"], + [False, True], + [False, True], + ] + param_names = ["file type", "height dim (len 71)", "time dim (len 3)"] + + def setup(self, file_type, three_d, three_times): + if file_type == "LFRic": + # Generate an appropriate synthetic LFRic file. + if three_times: + n_times = 3 + else: + n_times = 1 + + # Use a cubesphere size ~equivalent to our UM test data. + cells_per_panel_edge = _N_CUBESPHERE_UM_EQUIVALENT + create_kwargs = dict(c_size=cells_per_panel_edge, n_times=n_times) + + if three_d: + create_kwargs["n_levels"] = 71 + + # Will reuse a file if already present. + file_path = make_cubesphere_testfile(**create_kwargs) + + else: + # Locate the appropriate UM file. + if three_times: + # pa/pb003 files + numeric = "003" + else: + # pa/pb000 files + numeric = "000" + + if three_d: + # theta diagnostic, N1280 file w/ 71 levels (1920, 2560, 71) + file_name = f"umglaa_pb{numeric}-theta" + else: + # surface_temp diagnostic, N1280 file (1920, 2560) + file_name = f"umglaa_pa{numeric}-surfacetemp" + + file_suffices = { + "UM": "", # packed FF (WGDOS lbpack = 1) + "UM_lbpack0": ".uncompressed", # unpacked FF (lbpack = 0) + "UM_netcdf": ".nc", # UM file -> Iris -> NetCDF file + } + suffix = file_suffices[file_type] + + file_path = (BENCHMARK_DATA / file_name).with_suffix(suffix) + if not file_path.exists(): + message = "\n".join( + [ + f"Expected local file not found: {file_path}", + "Available from the UK Met Office.", + ] + ) + raise FileNotFoundError(message) + + self.file_path = file_path + self.file_type = file_type + + def load(self): + return load_cube(str(self.file_path)) diff --git a/benchmarks/benchmarks/cperf/equality.py b/benchmarks/benchmarks/cperf/equality.py new file mode 100644 index 0000000000..ffe61ef938 --- /dev/null +++ b/benchmarks/benchmarks/cperf/equality.py @@ -0,0 +1,55 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Equality benchmarks for the CPerf scheme of the UK Met Office's NG-VAT project.""" + +from .. import on_demand_benchmark +from . import SingleDiagnosticMixin + + +class EqualityMixin(SingleDiagnosticMixin): + r"""Use :class:`SingleDiagnosticMixin` as the realistic case. + + Uses :class:`SingleDiagnosticMixin` as the realistic case will be comparing + :class:`~iris.cube.Cube`\\ s that have been loaded from file. + + """ + + # Cut down the parent parameters. + params = [["LFRic", "UM"]] + + def setup(self, file_type, three_d=False, three_times=False): + super().setup(file_type, three_d, three_times) + self.cube = self.load() + self.other_cube = self.load() + + +@on_demand_benchmark +class CubeEquality(EqualityMixin): + r"""Benchmark time & memory costs of comparing LFRic & UM :class:`~iris.cube.Cube`\\ s.""" + + def _comparison(self): + _ = self.cube == self.other_cube + + def peakmem_eq(self, file_type): + self._comparison() + + def time_eq(self, file_type): + self._comparison() + + +@on_demand_benchmark +class MeshEquality(EqualityMixin): + """Provides extra context for :class:`CubeEquality`.""" + + params = [["LFRic"]] + + def _comparison(self): + _ = self.cube.mesh == self.other_cube.mesh + + def peakmem_eq(self, file_type): + self._comparison() + + def time_eq(self, file_type): + self._comparison() diff --git a/benchmarks/benchmarks/cperf/load.py b/benchmarks/benchmarks/cperf/load.py new file mode 100644 index 0000000000..07c2de9e79 --- /dev/null +++ b/benchmarks/benchmarks/cperf/load.py @@ -0,0 +1,55 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""File loading benchmarks for the CPerf scheme of the UK Met Office's NG-VAT project.""" + +from .. import on_demand_benchmark +from . import SingleDiagnosticMixin + + +@on_demand_benchmark +class SingleDiagnosticLoad(SingleDiagnosticMixin): + def time_load(self, _, __, ___): + """Perform a 'real world comparison'. + + * UM coords are always realised (DimCoords). + * LFRic coords are not realised by default (MeshCoords). + + """ + cube = self.load() + assert cube.has_lazy_data() + # UM files load lon/lat as DimCoords, which are always realised. + expecting_lazy_coords = self.file_type == "LFRic" + for coord_name in "longitude", "latitude": + coord = cube.coord(coord_name) + assert coord.has_lazy_points() == expecting_lazy_coords + assert coord.has_lazy_bounds() == expecting_lazy_coords + + def time_load_w_realised_coords(self, _, __, ___): + """Valuable extra comparison where both UM and LFRic coords are realised.""" + cube = self.load() + for coord_name in "longitude", "latitude": + coord = cube.coord(coord_name) + # Don't touch actual points/bounds objects - permanent + # realisation plays badly with ASV's re-run strategy. + if coord.has_lazy_points(): + coord.core_points().compute() + if coord.has_lazy_bounds(): + coord.core_bounds().compute() + + +@on_demand_benchmark +class SingleDiagnosticRealise(SingleDiagnosticMixin): + # The larger files take a long time to realise. + timeout = 600.0 + + def setup(self, file_type, three_d, three_times): + super().setup(file_type, three_d, three_times) + self.loaded_cube = self.load() + + def time_realise(self, _, __, ___): + # Don't touch loaded_cube.data - permanent realisation plays badly with + # ASV's re-run strategy. + assert self.loaded_cube.has_lazy_data() + self.loaded_cube.core_data().compute() diff --git a/benchmarks/benchmarks/cperf/save.py b/benchmarks/benchmarks/cperf/save.py new file mode 100644 index 0000000000..6dcd0b3bcf --- /dev/null +++ b/benchmarks/benchmarks/cperf/save.py @@ -0,0 +1,40 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""File saving benchmarks for the CPerf scheme of the UK Met Office's NG-VAT project.""" + +from iris import save + +from .. import on_demand_benchmark +from ..generate_data.ugrid import make_cube_like_2d_cubesphere, make_cube_like_umfield +from . import _N_CUBESPHERE_UM_EQUIVALENT, _UM_DIMS_YX + + +@on_demand_benchmark +class NetcdfSave: + """Benchmark time and memory costs of saving ~large-ish data cubes to netcdf. + + Parametrised by file type. + + """ + + params = ["LFRic", "UM"] + param_names = ["data type"] + + def setup(self, data_type): + if data_type == "LFRic": + self.cube = make_cube_like_2d_cubesphere( + n_cube=_N_CUBESPHERE_UM_EQUIVALENT, with_mesh=True + ) + else: + self.cube = make_cube_like_umfield(_UM_DIMS_YX) + + def _save_data(self, cube): + save(cube, "tmp.nc") + + def time_save_data_netcdf(self, data_type): + self._save_data(self.cube) + + def tracemalloc_save_data_netcdf(self, data_type): + self._save_data(self.cube) diff --git a/benchmarks/benchmarks/cube.py b/benchmarks/benchmarks/cube.py new file mode 100644 index 0000000000..0b6829ee2d --- /dev/null +++ b/benchmarks/benchmarks/cube.py @@ -0,0 +1,116 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Cube benchmark tests.""" + +from collections.abc import Iterable + +from iris import coords +from iris.cube import Cube + +from .generate_data.stock import realistic_4d_w_everything + + +class CubeCreation: + params = [[False, True], ["instantiate", "construct"]] + param_names = ["Cube has mesh", "Cube creation strategy"] + + cube_kwargs: dict + + def setup(self, w_mesh: bool, _) -> None: + # Loaded as two cubes due to the hybrid height. + source_cube = realistic_4d_w_everything(w_mesh=w_mesh) + + def get_coords_and_dims( + coords_iter: Iterable[coords._DimensionalMetadata], + ) -> list[tuple[coords._DimensionalMetadata, tuple[int, ...]]]: + return [(c, c.cube_dims(source_cube)) for c in coords_iter] + + self.cube_kwargs = dict( + data=source_cube.data, + standard_name=source_cube.standard_name, + long_name=source_cube.long_name, + var_name=source_cube.var_name, + units=source_cube.units, + attributes=source_cube.attributes, + cell_methods=source_cube.cell_methods, + dim_coords_and_dims=get_coords_and_dims(source_cube.dim_coords), + aux_coords_and_dims=get_coords_and_dims(source_cube.aux_coords), + aux_factories=source_cube.aux_factories, + cell_measures_and_dims=get_coords_and_dims(source_cube.cell_measures()), + ancillary_variables_and_dims=get_coords_and_dims( + source_cube.ancillary_variables() + ), + ) + + def time_create(self, _, cube_creation_strategy: str) -> None: + if cube_creation_strategy == "instantiate": + _ = Cube(**self.cube_kwargs) + + elif cube_creation_strategy == "construct": + new_cube = Cube(data=self.cube_kwargs["data"]) + new_cube.standard_name = self.cube_kwargs["standard_name"] + new_cube.long_name = self.cube_kwargs["long_name"] + new_cube.var_name = self.cube_kwargs["var_name"] + new_cube.units = self.cube_kwargs["units"] + new_cube.attributes = self.cube_kwargs["attributes"] + new_cube.cell_methods = self.cube_kwargs["cell_methods"] + for coord, dims in self.cube_kwargs["dim_coords_and_dims"]: + assert isinstance(coord, coords.DimCoord) # Type hint to help linters. + new_cube.add_dim_coord(coord, dims) + for coord, dims in self.cube_kwargs["aux_coords_and_dims"]: + new_cube.add_aux_coord(coord, dims) + for aux_factory in self.cube_kwargs["aux_factories"]: + new_cube.add_aux_factory(aux_factory) + for cell_measure, dims in self.cube_kwargs["cell_measures_and_dims"]: + new_cube.add_cell_measure(cell_measure, dims) + for ancillary_variable, dims in self.cube_kwargs[ + "ancillary_variables_and_dims" + ]: + new_cube.add_ancillary_variable(ancillary_variable, dims) + + else: + message = f"Unknown cube creation strategy: {cube_creation_strategy}" + raise NotImplementedError(message) + + +class CubeEquality: + params = [ + [False, True], + [False, True], + ["metadata_inequality", "coord_inequality", "data_inequality", "all_equal"], + ] + param_names = ["Cubes are lazy", "Cubes have meshes", "Scenario"] + + cube_1: Cube + cube_2: Cube + coord_name = "surface_altitude" + + def setup(self, lazy: bool, w_mesh: bool, scenario: str) -> None: + self.cube_1 = realistic_4d_w_everything(w_mesh=w_mesh, lazy=lazy) + # Using Cube.copy() produces different results due to sharing of the + # Mesh instance. + self.cube_2 = realistic_4d_w_everything(w_mesh=w_mesh, lazy=lazy) + + match scenario: + case "metadata_inequality": + self.cube_2.long_name = "different" + case "coord_inequality": + coord = self.cube_2.coord(self.coord_name) + coord.points = coord.core_points() * 2 + case "data_inequality": + self.cube_2.data = self.cube_2.core_data() * 2 + case "all_equal": + pass + case _: + message = f"Unknown scenario: {scenario}" + raise NotImplementedError(message) + + def time_equality(self, lazy: bool, __, ___) -> None: + _ = self.cube_1 == self.cube_2 + if lazy: + for cube in (self.cube_1, self.cube_2): + # Confirm that this benchmark is safe for repetition. + assert cube.coord(self.coord_name).has_lazy_points() + assert cube.has_lazy_data() diff --git a/benchmarks/benchmarks/generate_data/__init__.py b/benchmarks/benchmarks/generate_data/__init__.py new file mode 100644 index 0000000000..bb53e26b2f --- /dev/null +++ b/benchmarks/benchmarks/generate_data/__init__.py @@ -0,0 +1,116 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Scripts for generating supporting data for benchmarking. + +Data generated using Iris should use :func:`run_function_elsewhere`, which +means that data is generated using a fixed version of Iris and a fixed +environment, rather than those that get changed when the benchmarking run +checks out a new commit. + +Downstream use of data generated 'elsewhere' requires saving; usually in a +NetCDF file. Could also use pickling but there is a potential risk if the +benchmark sequence runs over two different Python versions. + +""" + +from contextlib import contextmanager +from inspect import getsource +from os import environ +from pathlib import Path +from subprocess import CalledProcessError, check_output, run +from textwrap import dedent +from warnings import warn + +from iris._lazy_data import as_concrete_data +from iris.fileformats import netcdf + +#: Python executable used by :func:`run_function_elsewhere`, set via env +#: variable of same name. Must be path of Python within an environment that +#: includes Iris (including dependencies and test modules) and Mule. +try: + DATA_GEN_PYTHON = environ["DATA_GEN_PYTHON"] + _ = check_output([DATA_GEN_PYTHON, "-c", "a = True"]) +except KeyError: + error = "Env variable DATA_GEN_PYTHON not defined." + raise KeyError(error) +except (CalledProcessError, FileNotFoundError, PermissionError): + error = "Env variable DATA_GEN_PYTHON not a runnable python executable path." + raise ValueError(error) + +# The default location of data files used in benchmarks. Used by CI. +default_data_dir = (Path(__file__).parents[2] / ".data").resolve() +# Optionally override the default data location with environment variable. +BENCHMARK_DATA = Path(environ.get("BENCHMARK_DATA", default_data_dir)) +if BENCHMARK_DATA == default_data_dir: + BENCHMARK_DATA.mkdir(exist_ok=True) + message = ( + f"No BENCHMARK_DATA env var, defaulting to {BENCHMARK_DATA}. " + "Note that some benchmark files are GB in size." + ) + warn(message) +elif not BENCHMARK_DATA.is_dir(): + message = f"Not a directory: {BENCHMARK_DATA} ." + raise ValueError(message) + +# Manual flag to allow the rebuilding of synthetic data. +# False forces a benchmark run to re-make all the data files. +REUSE_DATA = True + + +def run_function_elsewhere(func_to_run, *args, **kwargs): + """Run a given function using the :const:`DATA_GEN_PYTHON` executable. + + This structure allows the function to be written natively. + + Parameters + ---------- + func_to_run : FunctionType + The function object to be run. + NOTE: the function must be completely self-contained, i.e. perform all + its own imports (within the target :const:`DATA_GEN_PYTHON` + environment). + *args : tuple, optional + Function call arguments. Must all be expressible as simple literals, + i.e. the ``repr`` must be a valid literal expression. + **kwargs: dict, optional + Function call keyword arguments. All values must be expressible as + simple literals (see ``*args``). + + Returns + ------- + str + The ``stdout`` from the run. + + """ + func_string = dedent(getsource(func_to_run)) + func_string = func_string.replace("@staticmethod\n", "") + func_call_term_strings = [repr(arg) for arg in args] + func_call_term_strings += [f"{name}={repr(val)}" for name, val in kwargs.items()] + func_call_string = ( + f"{func_to_run.__name__}(" + ",".join(func_call_term_strings) + ")" + ) + python_string = "\n".join([func_string, func_call_string]) + result = run( + [DATA_GEN_PYTHON, "-c", python_string], capture_output=True, check=True + ) + return result.stdout + + +@contextmanager +def load_realised(): + """Force NetCDF loading with realised arrays. + + Since passing between data generation and benchmarking environments is via + file loading, but some benchmarks are only meaningful if starting with real + arrays. + """ + from iris.fileformats.netcdf.loader import _get_cf_var_data as pre_patched + + def patched(cf_var, filename): + return as_concrete_data(pre_patched(cf_var, filename)) + + netcdf._get_cf_var_data = patched + yield netcdf + netcdf._get_cf_var_data = pre_patched diff --git a/benchmarks/benchmarks/generate_data/stock.py b/benchmarks/benchmarks/generate_data/stock.py new file mode 100644 index 0000000000..04698e8ff5 --- /dev/null +++ b/benchmarks/benchmarks/generate_data/stock.py @@ -0,0 +1,183 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Wrappers for using :mod:`iris.tests.stock` methods for benchmarking. + +See :mod:`benchmarks.generate_data` for an explanation of this structure. +""" + +from contextlib import nullcontext +from hashlib import sha256 +import json +from pathlib import Path + +import iris +from iris import cube +from iris.mesh import load_mesh + +from . import BENCHMARK_DATA, REUSE_DATA, load_realised, run_function_elsewhere + + +def hash_args(*args, **kwargs): + """Convert arguments into a short hash - for preserving args in filenames.""" + arg_string = str(args) + kwarg_string = json.dumps(kwargs) + full_string = arg_string + kwarg_string + return sha256(full_string.encode()).hexdigest()[:10] + + +def _create_file__xios_common(func_name, **kwargs): + def _external(func_name_, temp_file_dir, **kwargs_): + from iris.tests.stock import netcdf + + func = getattr(netcdf, func_name_) + print(func(temp_file_dir, **kwargs_), end="") + + args_hash = hash_args(**kwargs) + save_path = (BENCHMARK_DATA / f"{func_name}_{args_hash}").with_suffix(".nc") + if not REUSE_DATA or not save_path.is_file(): + # The xios functions take control of save location so need to move to + # a more specific name that allows reuse. + actual_path = run_function_elsewhere( + _external, + func_name_=func_name, + temp_file_dir=str(BENCHMARK_DATA), + **kwargs, + ) + Path(actual_path.decode()).replace(save_path) + return save_path + + +def create_file__xios_2d_face_half_levels( + temp_file_dir, dataset_name, n_faces=866, n_times=1 +): + """Create file wrapper for :meth:`iris.tests.stock.netcdf.create_file__xios_2d_face_half_levels`. + + Have taken control of temp_file_dir + + todo: is create_file__xios_2d_face_half_levels still appropriate now we can + properly save Mesh Cubes? + """ + return _create_file__xios_common( + func_name="create_file__xios_2d_face_half_levels", + dataset_name=dataset_name, + n_faces=n_faces, + n_times=n_times, + ) + + +def create_file__xios_3d_face_half_levels( + temp_file_dir, dataset_name, n_faces=866, n_times=1, n_levels=38 +): + """Create file wrapper for :meth:`iris.tests.stock.netcdf.create_file__xios_3d_face_half_levels`. + + Have taken control of temp_file_dir + + todo: is create_file__xios_3d_face_half_levels still appropriate now we can + properly save Mesh Cubes? + """ + return _create_file__xios_common( + func_name="create_file__xios_3d_face_half_levels", + dataset_name=dataset_name, + n_faces=n_faces, + n_times=n_times, + n_levels=n_levels, + ) + + +def sample_mesh(n_nodes=None, n_faces=None, n_edges=None, lazy_values=False): + """Sample mesh wrapper for :meth:iris.tests.stock.mesh.sample_mesh`.""" + + def _external(*args, **kwargs): + from iris.mesh import save_mesh + from iris.tests.stock.mesh import sample_mesh + + save_path_ = kwargs.pop("save_path") + # Always saving, so laziness is irrelevant. Use lazy to save time. + kwargs["lazy_values"] = True + new_mesh = sample_mesh(*args, **kwargs) + save_mesh(new_mesh, save_path_) + + arg_list = [n_nodes, n_faces, n_edges] + args_hash = hash_args(*arg_list) + save_path = (BENCHMARK_DATA / f"sample_mesh_{args_hash}").with_suffix(".nc") + if not REUSE_DATA or not save_path.is_file(): + _ = run_function_elsewhere(_external, *arg_list, save_path=str(save_path)) + if not lazy_values: + # Realise everything. + with load_realised(): + mesh = load_mesh(str(save_path)) + else: + mesh = load_mesh(str(save_path)) + return mesh + + +def sample_meshcoord(sample_mesh_kwargs=None, location="face", axis="x"): + """Sample meshcoord wrapper for :meth:`iris.tests.stock.mesh.sample_meshcoord`. + + Parameters deviate from the original as cannot pass a + :class:`iris.mesh.Mesh to the separate Python instance - must + instead generate the Mesh as well. + + MeshCoords cannot be saved to file, so the _external method saves the + MeshCoord's Mesh, then the original Python instance loads in that Mesh and + regenerates the MeshCoord from there. + """ + + def _external(sample_mesh_kwargs_, save_path_): + from iris.mesh import save_mesh + from iris.tests.stock.mesh import sample_mesh, sample_meshcoord + + if sample_mesh_kwargs_: + input_mesh = sample_mesh(**sample_mesh_kwargs_) + else: + input_mesh = None + # Don't parse the location or axis arguments - only saving the Mesh at + # this stage. + new_meshcoord = sample_meshcoord(mesh=input_mesh) + save_mesh(new_meshcoord.mesh, save_path_) + + args_hash = hash_args(**sample_mesh_kwargs) + save_path = (BENCHMARK_DATA / f"sample_mesh_coord_{args_hash}").with_suffix(".nc") + if not REUSE_DATA or not save_path.is_file(): + _ = run_function_elsewhere( + _external, + sample_mesh_kwargs_=sample_mesh_kwargs, + save_path_=str(save_path), + ) + with load_realised(): + source_mesh = load_mesh(str(save_path)) + # Regenerate MeshCoord from its Mesh, which we saved. + return source_mesh.to_MeshCoord(location=location, axis=axis) + + +def realistic_4d_w_everything(w_mesh=False, lazy=False) -> iris.cube.Cube: + """Run :func:`iris.tests.stock.realistic_4d_w_everything` in ``DATA_GEN_PYTHON``. + + Parameters + ---------- + w_mesh : bool + See :func:`iris.tests.stock.realistic_4d_w_everything` for details. + lazy : bool + If True, the Cube will be returned with all arrays as they would + normally be loaded from file (i.e. most will still be lazy Dask + arrays). If False, all arrays will be realised NumPy arrays. + + """ + + def _external(w_mesh_: str, save_path_: str): + import iris + from iris.tests.stock import realistic_4d_w_everything + + cube = realistic_4d_w_everything(w_mesh=bool(w_mesh_)) + iris.save(cube, save_path_) + + save_path = (BENCHMARK_DATA / f"realistic_4d_w_everything_{w_mesh}").with_suffix( + ".nc" + ) + if not REUSE_DATA or not save_path.is_file(): + _ = run_function_elsewhere(_external, w_mesh_=w_mesh, save_path_=str(save_path)) + context = nullcontext() if lazy else load_realised() + with context: + return iris.load_cube(save_path, "air_potential_temperature") diff --git a/benchmarks/benchmarks/generate_data/ugrid.py b/benchmarks/benchmarks/generate_data/ugrid.py new file mode 100644 index 0000000000..2cef4752ee --- /dev/null +++ b/benchmarks/benchmarks/generate_data/ugrid.py @@ -0,0 +1,190 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Scripts for generating supporting data for UGRID-related benchmarking.""" + +from iris import load_cube as iris_loadcube + +from . import BENCHMARK_DATA, REUSE_DATA, load_realised, run_function_elsewhere +from .stock import ( + create_file__xios_2d_face_half_levels, + create_file__xios_3d_face_half_levels, +) + + +def generate_cube_like_2d_cubesphere(n_cube: int, with_mesh: bool, output_path: str): + """Construct and save to file an LFRIc cubesphere-like cube. + + Construct and save to file an LFRIc cubesphere-like cube for a given + cubesphere size, *or* a simpler structured (UM-like) cube of equivalent + size. + + NOTE: this function is *NEVER* called from within this actual package. + Instead, it is to be called via benchmarks.remote_data_generation, + so that it can use up-to-date facilities, independent of the ASV controlled + environment which contains the "Iris commit under test". + + This means: + + * it must be completely self-contained : i.e. it includes all its + own imports, and saves results to an output file. + + """ + from iris import save + from iris.tests.stock.mesh import sample_mesh, sample_mesh_cube + + n_face_nodes = n_cube * n_cube + n_faces = 6 * n_face_nodes + + # Set n_nodes=n_faces and n_edges=2*n_faces + # : Not exact, but similar to a 'real' cubesphere. + n_nodes = n_faces + n_edges = 2 * n_faces + if with_mesh: + mesh = sample_mesh( + n_nodes=n_nodes, n_faces=n_faces, n_edges=n_edges, lazy_values=True + ) + cube = sample_mesh_cube(mesh=mesh, n_z=1) + else: + cube = sample_mesh_cube(nomesh_faces=n_faces, n_z=1) + + # Strip off the 'extra' aux-coord mapping the mesh, which sample-cube adds + # but which we don't want. + cube.remove_coord("mesh_face_aux") + + # Save the result to a named file. + save(cube, output_path) + + +def make_cube_like_2d_cubesphere(n_cube: int, with_mesh: bool): + """Generate an LFRIc cubesphere-like cube. + + Generate an LFRIc cubesphere-like cube for a given cubesphere size, + *or* a simpler structured (UM-like) cube of equivalent size. + + All the cube data, coords and mesh content are LAZY, and produced without + allocating large real arrays (to allow peak-memory testing). + + NOTE: the actual cube generation is done in a stable Iris environment via + benchmarks.remote_data_generation, so it is all channeled via cached netcdf + files in our common testdata directory. + + """ + identifying_filename = f"cube_like_2d_cubesphere_C{n_cube}_Mesh={with_mesh}.nc" + filepath = BENCHMARK_DATA / identifying_filename + if not filepath.exists(): + # Create the required testfile, by running the generation code remotely + # in a 'fixed' python environment. + run_function_elsewhere( + generate_cube_like_2d_cubesphere, + n_cube, + with_mesh=with_mesh, + output_path=str(filepath), + ) + + # File now *should* definitely exist: content is simply the desired cube. + cube = iris_loadcube(str(filepath)) + + # Ensure correct laziness. + _ = cube.data + for coord in cube.coords(mesh_coords=False): + assert not coord.has_lazy_points() + assert not coord.has_lazy_bounds() + if cube.mesh: + for coord in cube.mesh.coords(): + assert coord.has_lazy_points() + for conn in cube.mesh.connectivities(): + assert conn.has_lazy_indices() + + return cube + + +def make_cube_like_umfield(xy_dims): + """Create a "UM-like" cube with lazy content, for save performance testing. + + Roughly equivalent to a single current UM cube, to be compared with + a "make_cube_like_2d_cubesphere(n_cube=_N_CUBESPHERE_UM_EQUIVALENT)" + (see below). + + Note: probably a bit over-simplified, as there is no time coord, but that + is probably equally true of our LFRic-style synthetic data. + + Parameters + ---------- + xy_dims : 2-tuple + Set the horizontal dimensions = n-lats, n-lons. + + """ + + def _external(xy_dims_, save_path_): + from dask import array as da + import numpy as np + + from iris import save + from iris.coords import DimCoord + from iris.cube import Cube + + nz, ny, nx = (1,) + xy_dims_ + + # Base data : Note this is float32 not float64 like LFRic/XIOS outputs. + lazy_data = da.zeros((nz, ny, nx), dtype=np.float32) + cube = Cube(lazy_data, long_name="structured_phenom") + + # Add simple dim coords also. + z_dimco = DimCoord(np.arange(nz), long_name="level", units=1) + y_dimco = DimCoord( + np.linspace(-90.0, 90.0, ny), + standard_name="latitude", + units="degrees", + ) + x_dimco = DimCoord( + np.linspace(-180.0, 180.0, nx), + standard_name="longitude", + units="degrees", + ) + for idim, co in enumerate([z_dimco, y_dimco, x_dimco]): + cube.add_dim_coord(co, idim) + + save(cube, save_path_) + + save_path = (BENCHMARK_DATA / f"make_cube_like_umfield_{xy_dims}").with_suffix( + ".nc" + ) + if not REUSE_DATA or not save_path.is_file(): + _ = run_function_elsewhere(_external, xy_dims, str(save_path)) + with load_realised(): + cube = iris_loadcube(str(save_path)) + + return cube + + +def make_cubesphere_testfile(c_size, n_levels=0, n_times=1): + """Build a C cubesphere testfile in a given directory. + + Build a C cubesphere testfile in a given directory, with a standard naming. + If n_levels > 0 specified: 3d file with the specified number of levels. + Return the file path. + + TODO: is create_file__xios... still appropriate now we can properly save Mesh Cubes? + + """ + n_faces = 6 * c_size * c_size + stem_name = f"mesh_cubesphere_C{c_size}_t{n_times}" + kwargs = dict( + temp_file_dir=None, + dataset_name=stem_name, # N.B. function adds the ".nc" extension + n_times=n_times, + n_faces=n_faces, + ) + + three_d = n_levels > 0 + if three_d: + kwargs["n_levels"] = n_levels + kwargs["dataset_name"] += f"_{n_levels}levels" + func = create_file__xios_3d_face_half_levels + else: + func = create_file__xios_2d_face_half_levels + + file_path = func(**kwargs) + return file_path diff --git a/benchmarks/benchmarks/generate_data/um_files.py b/benchmarks/benchmarks/generate_data/um_files.py new file mode 100644 index 0000000000..40bf83e79c --- /dev/null +++ b/benchmarks/benchmarks/generate_data/um_files.py @@ -0,0 +1,188 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Generate FF, PP and NetCDF files based on a minimal synthetic FF file. + +NOTE: uses the Mule package, so depends on an environment with Mule installed. +""" + + +def _create_um_files( + len_x: int, len_y: int, len_z: int, len_t: int, compress, save_paths: dict +) -> None: + """Generate an FF object of given shape and compression, save to FF/PP/NetCDF. + + This is run externally + (:func:`benchmarks.generate_data.run_function_elsewhere`), so all imports + are self-contained and input parameters are simple types. + """ + from copy import deepcopy + from datetime import datetime + from tempfile import NamedTemporaryFile + + from mule import ArrayDataProvider, Field3, FieldsFile + from mule.pp import fields_to_pp_file + import numpy as np + + from iris import load_cube + from iris import save as save_cube + + template = { + "fixed_length_header": {"dataset_type": 3, "grid_staggering": 3}, + "integer_constants": { + "num_p_levels": len_z, + "num_cols": len_x, + "num_rows": len_y, + }, + "real_constants": {}, + "level_dependent_constants": {"dims": (len_z + 1, None)}, + } + new_ff = FieldsFile.from_template(deepcopy(template)) + + data_array = np.arange(len_x * len_y).reshape(len_x, len_y) + array_provider = ArrayDataProvider(data_array) + + def add_field(level_: int, time_step_: int) -> None: + """Add a minimal field to the new :class:`~mule.FieldsFile`. + + Includes the minimum information to allow Mule saving and Iris + loading, as well as incrementation for vertical levels and time + steps to allow generation of z and t dimensions. + """ + new_field = Field3.empty() + # To correspond to the header-release 3 class used. + new_field.lbrel = 3 + # Mule uses the first element of the lookup to test for + # unpopulated fields (and skips them), so the first element should + # be set to something. The year will do. + new_field.raw[1] = datetime.now().year + + # Horizontal. + new_field.lbcode = 1 + new_field.lbnpt = len_x + new_field.lbrow = len_y + new_field.bdx = new_ff.real_constants.col_spacing + new_field.bdy = new_ff.real_constants.row_spacing + new_field.bzx = new_ff.real_constants.start_lon - 0.5 * new_field.bdx + new_field.bzy = new_ff.real_constants.start_lat - 0.5 * new_field.bdy + + # Hemisphere. + new_field.lbhem = 32 + # Processing. + new_field.lbproc = 0 + + # Vertical. + # Hybrid height values by simulating sequences similar to those in a + # theta file. + new_field.lbvc = 65 + if level_ == 0: + new_field.lblev = 9999 + else: + new_field.lblev = level_ + + level_1 = level_ + 1 + six_rec = 20 / 3 + three_rec = six_rec / 2 + + new_field.blev = level_1**2 * six_rec - six_rec + new_field.brsvd1 = level_1**2 * six_rec + (six_rec * level_1) - three_rec + + brsvd2_simulated = np.linspace(0.995, 0, len_z) + shift = min(len_z, 2) + bhrlev_simulated = np.concatenate([np.ones(shift), brsvd2_simulated[:-shift]]) + new_field.brsvd2 = brsvd2_simulated[level_] + new_field.bhrlev = bhrlev_simulated[level_] + + # Time. + new_field.lbtim = 11 + + new_field.lbyr = time_step_ + for attr_name in ["lbmon", "lbdat", "lbhr", "lbmin", "lbsec"]: + setattr(new_field, attr_name, 0) + + new_field.lbyrd = time_step_ + 1 + for attr_name in ["lbmond", "lbdatd", "lbhrd", "lbmind", "lbsecd"]: + setattr(new_field, attr_name, 0) + + # Data and packing. + new_field.lbuser1 = 1 + new_field.lbpack = int(compress) + new_field.bacc = 0 + new_field.bmdi = -1 + new_field.lbext = 0 + new_field.set_data_provider(array_provider) + + new_ff.fields.append(new_field) + + for time_step in range(len_t): + for level in range(len_z): + add_field(level, time_step + 1) + + ff_path = save_paths.get("FF", None) + pp_path = save_paths.get("PP", None) + nc_path = save_paths.get("NetCDF", None) + + if ff_path: + new_ff.to_file(ff_path) + if pp_path: + fields_to_pp_file(str(pp_path), new_ff.fields) + if nc_path: + temp_ff_path = None + # Need an Iris Cube from the FF content. + if ff_path: + # Use the existing file. + ff_cube = load_cube(ff_path) + else: + # Make a temporary file. + temp_ff_path = NamedTemporaryFile() + new_ff.to_file(temp_ff_path.name) + ff_cube = load_cube(temp_ff_path.name) + + save_cube(ff_cube, nc_path, zlib=compress) + if temp_ff_path: + temp_ff_path.close() + + +FILE_EXTENSIONS = {"FF": "", "PP": ".pp", "NetCDF": ".nc"} + + +def create_um_files( + len_x: int, + len_y: int, + len_z: int, + len_t: int, + compress: bool, + file_types: list, +) -> dict: + """Generate FF-based FF / PP / NetCDF files with specified shape and compression. + + All files representing a given shape are saved in a dedicated directory. A + dictionary of the saved paths is returned. + + If the required files exist, they are re-used, unless + :const:`benchmarks.REUSE_DATA` is ``False``. + """ + # Self contained imports to avoid linting confusion with _create_um_files(). + from . import BENCHMARK_DATA, REUSE_DATA, run_function_elsewhere + + save_name_sections = ["UM", len_x, len_y, len_z, len_t] + save_name = "_".join(str(section) for section in save_name_sections) + save_dir = BENCHMARK_DATA / save_name + if not save_dir.is_dir(): + save_dir.mkdir(parents=True) + + save_paths = {} + files_exist = True + for file_type in file_types: + file_ext = FILE_EXTENSIONS[file_type] + save_path = (save_dir / f"{compress}").with_suffix(file_ext) + files_exist = files_exist and save_path.is_file() + save_paths[file_type] = str(save_path) + + if not REUSE_DATA or not files_exist: + _ = run_function_elsewhere( + _create_um_files, len_x, len_y, len_z, len_t, compress, save_paths + ) + + return save_paths diff --git a/benchmarks/benchmarks/import_iris.py b/benchmarks/benchmarks/import_iris.py new file mode 100644 index 0000000000..ff5f19e421 --- /dev/null +++ b/benchmarks/benchmarks/import_iris.py @@ -0,0 +1,278 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. + +"""Import iris benchmarking.""" + +from importlib import import_module, reload + +################ +# Prepare info for reset_colormaps: + +# Import and capture colormaps. +from matplotlib import colormaps # isort:skip + +_COLORMAPS_ORIG = set(colormaps) + +# Import iris.palette, which modifies colormaps. +import iris.palette + +# Derive which colormaps have been added by iris.palette. +_COLORMAPS_MOD = set(colormaps) +COLORMAPS_EXTRA = _COLORMAPS_MOD - _COLORMAPS_ORIG + +# Touch iris.palette to prevent linters complaining. +_ = iris.palette + +################ + + +class Iris: + @staticmethod + def _import(module_name, reset_colormaps=False): + """Have experimented with adding sleep() commands into the imported modules. + + The results reveal: + + ASV avoids invoking `import x` if nothing gets called in the + benchmark (some imports were timed, but only those where calls + happened during import). + + Using reload() is not identical to importing, but does produce + results that are very close to expected import times, so this is fine + for monitoring for regressions. + It is also ideal for accurate repetitions, without the need to mess + with the ASV `number` attribute etc, since cached imports are not used + and the repetitions are therefore no faster than the first run. + """ + mod = import_module(module_name) + + if reset_colormaps: + # Needed because reload() will attempt to register new colormaps a + # second time, which errors by default. + for cm_name in COLORMAPS_EXTRA: + colormaps.unregister(cm_name) + + reload(mod) + + def time_iris(self): + self._import("iris") + + def time__concatenate(self): + self._import("iris._concatenate") + + def time__constraints(self): + self._import("iris._constraints") + + def time__data_manager(self): + self._import("iris._data_manager") + + def time__deprecation(self): + self._import("iris._deprecation") + + def time__lazy_data(self): + self._import("iris._lazy_data") + + def time__merge(self): + self._import("iris._merge") + + def time__representation(self): + self._import("iris._representation") + + def time_analysis(self): + self._import("iris.analysis") + + def time_analysis__area_weighted(self): + self._import("iris.analysis._area_weighted") + + def time_analysis__grid_angles(self): + self._import("iris.analysis._grid_angles") + + def time_analysis__interpolation(self): + self._import("iris.analysis._interpolation") + + def time_analysis__regrid(self): + self._import("iris.analysis._regrid") + + def time_analysis__scipy_interpolate(self): + self._import("iris.analysis._scipy_interpolate") + + def time_analysis_calculus(self): + self._import("iris.analysis.calculus") + + def time_analysis_cartography(self): + self._import("iris.analysis.cartography") + + def time_analysis_geomerty(self): + self._import("iris.analysis.geometry") + + def time_analysis_maths(self): + self._import("iris.analysis.maths") + + def time_analysis_stats(self): + self._import("iris.analysis.stats") + + def time_analysis_trajectory(self): + self._import("iris.analysis.trajectory") + + def time_aux_factory(self): + self._import("iris.aux_factory") + + def time_common(self): + self._import("iris.common") + + def time_common_lenient(self): + self._import("iris.common.lenient") + + def time_common_metadata(self): + self._import("iris.common.metadata") + + def time_common_mixin(self): + self._import("iris.common.mixin") + + def time_common_resolve(self): + self._import("iris.common.resolve") + + def time_config(self): + self._import("iris.config") + + def time_coord_categorisation(self): + self._import("iris.coord_categorisation") + + def time_coord_systems(self): + self._import("iris.coord_systems") + + def time_coords(self): + self._import("iris.coords") + + def time_cube(self): + self._import("iris.cube") + + def time_exceptions(self): + self._import("iris.exceptions") + + def time_experimental(self): + self._import("iris.experimental") + + def time_fileformats(self): + self._import("iris.fileformats") + + def time_fileformats__ff(self): + self._import("iris.fileformats._ff") + + def time_fileformats__ff_cross_references(self): + self._import("iris.fileformats._ff_cross_references") + + def time_fileformats__pp_lbproc_pairs(self): + self._import("iris.fileformats._pp_lbproc_pairs") + + def time_fileformats_structured_array_identification(self): + self._import("iris.fileformats._structured_array_identification") + + def time_fileformats_abf(self): + self._import("iris.fileformats.abf") + + def time_fileformats_cf(self): + self._import("iris.fileformats.cf") + + def time_fileformats_dot(self): + self._import("iris.fileformats.dot") + + def time_fileformats_name(self): + self._import("iris.fileformats.name") + + def time_fileformats_name_loaders(self): + self._import("iris.fileformats.name_loaders") + + def time_fileformats_netcdf(self): + self._import("iris.fileformats.netcdf") + + def time_fileformats_nimrod(self): + self._import("iris.fileformats.nimrod") + + def time_fileformats_nimrod_load_rules(self): + self._import("iris.fileformats.nimrod_load_rules") + + def time_fileformats_pp(self): + self._import("iris.fileformats.pp") + + def time_fileformats_pp_load_rules(self): + self._import("iris.fileformats.pp_load_rules") + + def time_fileformats_pp_save_rules(self): + self._import("iris.fileformats.pp_save_rules") + + def time_fileformats_rules(self): + self._import("iris.fileformats.rules") + + def time_fileformats_um(self): + self._import("iris.fileformats.um") + + def time_fileformats_um__fast_load(self): + self._import("iris.fileformats.um._fast_load") + + def time_fileformats_um__fast_load_structured_fields(self): + self._import("iris.fileformats.um._fast_load_structured_fields") + + def time_fileformats_um__ff_replacement(self): + self._import("iris.fileformats.um._ff_replacement") + + def time_fileformats_um__optimal_array_structuring(self): + self._import("iris.fileformats.um._optimal_array_structuring") + + def time_fileformats_um_cf_map(self): + self._import("iris.fileformats.um_cf_map") + + def time_io(self): + self._import("iris.io") + + def time_io_format_picker(self): + self._import("iris.io.format_picker") + + def time_iterate(self): + self._import("iris.iterate") + + def time_palette(self): + self._import("iris.palette", reset_colormaps=True) + + def time_plot(self): + self._import("iris.plot") + + def time_quickplot(self): + self._import("iris.quickplot") + + def time_std_names(self): + self._import("iris.std_names") + + def time_symbols(self): + self._import("iris.symbols") + + def time_tests(self): + self._import("iris.tests") + + def time_time(self): + self._import("iris.time") + + def time_util(self): + self._import("iris.util") + + # third-party imports + + def time_third_party_cartopy(self): + self._import("cartopy") + + def time_third_party_cf_units(self): + self._import("cf_units") + + def time_third_party_cftime(self): + self._import("cftime") + + def time_third_party_matplotlib(self): + self._import("matplotlib") + + def time_third_party_numpy(self): + self._import("numpy") + + def time_third_party_scipy(self): + self._import("scipy") diff --git a/benchmarks/benchmarks/iterate.py b/benchmarks/benchmarks/iterate.py new file mode 100644 index 0000000000..664bcf8ba2 --- /dev/null +++ b/benchmarks/benchmarks/iterate.py @@ -0,0 +1,26 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Iterate benchmark tests.""" + +import numpy as np + +from iris import coords, cube, iterate + + +class IZip: + def setup(self): + data_2d = np.zeros((1000,) * 2) + data_1d = data_2d[0] + local_cube = cube.Cube(data_2d) + coord_a = coords.AuxCoord(points=data_1d, long_name="a") + coord_b = coords.AuxCoord(points=data_1d, long_name="b") + self.coord_names = (coord.long_name for coord in (coord_a, coord_b)) + + local_cube.add_aux_coord(coord_a, 0) + local_cube.add_aux_coord(coord_b, 1) + self.cube = local_cube + + def time_izip(self): + iterate.izip(self.cube, coords=self.coord_names) diff --git a/benchmarks/benchmarks/load/__init__.py b/benchmarks/benchmarks/load/__init__.py new file mode 100644 index 0000000000..a4dfb40d19 --- /dev/null +++ b/benchmarks/benchmarks/load/__init__.py @@ -0,0 +1,169 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""File loading benchmark tests.""" + +from iris import AttributeConstraint, Constraint, load, load_cube +from iris.cube import Cube +from iris.fileformats.um import structured_um_loading + +from ..generate_data import BENCHMARK_DATA, REUSE_DATA, run_function_elsewhere +from ..generate_data.um_files import create_um_files + + +class LoadAndRealise: + # For data generation + timeout = 600.0 + params = ( + [(50, 50, 2), (1280, 960, 5), (2, 2, 1000)], + [False, True], + ["FF", "PP", "NetCDF"], + ) + param_names = ["xyz", "compressed", "file_format"] + + def setup_cache(self) -> dict: + file_type_args = self.params[2] + file_path_dict: dict[tuple[int, int, int], dict[bool, dict[str, str]]] = {} + for xyz in self.params[0]: + file_path_dict[xyz] = {} + x, y, z = xyz + for compress in self.params[1]: + file_path_dict[xyz][compress] = create_um_files( + x, y, z, 1, compress, file_type_args + ) + return file_path_dict + + def setup( + self, + file_path_dict: dict, + xyz: tuple, + compress: bool, + file_format: str, + ) -> None: + self.file_path = file_path_dict[xyz][compress][file_format] + self.cube = self.load() + + def load(self) -> Cube: + return load_cube(self.file_path) + + def time_load(self, _, __, ___, ____) -> None: + _ = self.load() + + def time_realise(self, _, __, ___, ____) -> None: + # Don't touch cube.data - permanent realisation plays badly with ASV's + # re-run strategy. + assert self.cube.has_lazy_data() + self.cube.core_data().compute() + + +class STASHConstraint: + # xyz sizes mimic LoadAndRealise to maximise file reuse. + params = ([(2, 2, 2), (1280, 960, 5), (2, 2, 1000)], ["FF", "PP"]) + param_names = ["xyz", "file_format"] + + def setup_cache(self) -> dict: + file_type_args = self.params[1] + file_path_dict = {} + for xyz in self.params[0]: + x, y, z = xyz + file_path_dict[xyz] = create_um_files(x, y, z, 1, False, file_type_args) + return file_path_dict + + def setup(self, file_path_dict: dict, xyz: tuple, file_format: str) -> None: + self.file_path = file_path_dict[xyz][file_format] + + def time_stash_constraint(self, _, __, ___) -> None: + _ = load_cube(self.file_path, AttributeConstraint(STASH="m??s??i901")) + + +class TimeConstraint: + params = ([3, 20], ["FF", "PP", "NetCDF"]) + param_names = ["time_dim_len", "file_format"] + + def setup_cache(self) -> dict: + file_type_args = self.params[1] + file_path_dict = {} + for time_dim_len in self.params[0]: + file_path_dict[time_dim_len] = create_um_files( + 20, 20, 5, time_dim_len, False, file_type_args + ) + return file_path_dict + + def setup(self, file_path_dict: dict, time_dim_len: int, file_format: str) -> None: + self.file_path = file_path_dict[time_dim_len][file_format] + self.time_constr = Constraint(time=lambda cell: cell.point.year < 3) + + def time_time_constraint(self, _, __, ___) -> None: + _ = load_cube(self.file_path, self.time_constr) + + +class ManyVars: + FILE_PATH = BENCHMARK_DATA / "many_var_file.nc" + + @staticmethod + def _create_file(save_path: str) -> None: + """Run externally - everything must be self-contained.""" + import numpy as np + + from iris import save + from iris.coords import AuxCoord + from iris.cube import Cube + + data_len = 8 + data = np.arange(data_len) + cube = Cube(data, units="unknown") + extra_vars = 80 + names = ["coord_" + str(i) for i in range(extra_vars)] + for name in names: + coord = AuxCoord(data, long_name=name, units="unknown") + cube.add_aux_coord(coord, 0) + save(cube, save_path) + + def setup_cache(self) -> None: + if not REUSE_DATA or not self.FILE_PATH.is_file(): + # See :mod:`benchmarks.generate_data` docstring for full explanation. + _ = run_function_elsewhere( + self._create_file, + str(self.FILE_PATH), + ) + + def time_many_var_load(self) -> None: + _ = load(str(self.FILE_PATH)) + + +class StructuredFF: + """Test structured loading of a large-ish fieldsfile. + + Structured load of the larger size should show benefit over standard load, + avoiding the cost of merging. + """ + + params = ([(2, 2, 2), (1280, 960, 5), (2, 2, 1000)], [False, True]) + param_names = ["xyz", "structured_loading"] + + def setup_cache(self) -> dict: + file_path_dict = {} + for xyz in self.params[0]: + x, y, z = xyz + file_path_dict[xyz] = create_um_files(x, y, z, 1, False, ["FF"]) + return file_path_dict + + def setup(self, file_path_dict, xyz, structured_load): + self.file_path = file_path_dict[xyz]["FF"] + self.structured_load = structured_load + + def load(self): + """Load the whole file (in fact there is only 1 cube).""" + + def _load(): + _ = load(self.file_path) + + if self.structured_load: + with structured_um_loading(): + _load() + else: + _load() + + def time_structured_load(self, _, __, ___): + self.load() diff --git a/benchmarks/benchmarks/load/ugrid.py b/benchmarks/benchmarks/load/ugrid.py new file mode 100644 index 0000000000..5ad0086ef3 --- /dev/null +++ b/benchmarks/benchmarks/load/ugrid.py @@ -0,0 +1,115 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Mesh data loading benchmark tests.""" + +from iris import load_cube as iris_load_cube +from iris.mesh import load_mesh as iris_load_mesh + +from ..generate_data.stock import create_file__xios_2d_face_half_levels + + +def synthetic_data(**kwargs): + # Ensure all uses of the synthetic data function use the common directory. + # File location is controlled by :mod:`generate_data`, hence temp_file_dir=None. + return create_file__xios_2d_face_half_levels(temp_file_dir=None, **kwargs) + + +def load_cube(*args, **kwargs): + return iris_load_cube(*args, **kwargs) + + +def load_mesh(*args, **kwargs): + return iris_load_mesh(*args, **kwargs) + + +class BasicLoading: + params = [1, int(2e5)] + param_names = ["number of faces"] + + def setup_common(self, **kwargs): + self.data_path = synthetic_data(**kwargs) + + def setup(self, *args): + self.setup_common(dataset_name="Loading", n_faces=args[0]) + + def time_load_file(self, *args): + _ = load_cube(str(self.data_path)) + + def time_load_mesh(self, *args): + _ = load_mesh(str(self.data_path)) + + +class BasicLoadingTime(BasicLoading): + """Same as BasicLoading, but scaling over a time series - an unlimited dimension.""" + + # NOTE iris#4834 - careful how big the time dimension is (time dimension + # is UNLIMITED). + + param_names = ["number of time steps"] + + def setup(self, *args): + self.setup_common(dataset_name="Loading", n_faces=1, n_times=args[0]) + + +class DataRealisation: + # Prevent repeat runs between setup() runs - data won't be lazy after 1st. + number = 1 + # Compensate for reduced certainty by increasing number of repeats. + repeat = (10, 10, 10.0) + # Prevent ASV running its warmup, which ignores `number` and would + # therefore get a false idea of typical run time since the data would stop + # being lazy. + warmup_time = 0.0 + timeout = 300.0 + + params = [int(1e4), int(2e5)] + param_names = ["number of faces"] + + def setup_common(self, **kwargs): + data_path = synthetic_data(**kwargs) + self.cube = load_cube(str(data_path)) + + def setup(self, *args): + self.setup_common(dataset_name="Realisation", n_faces=args[0]) + + def time_realise_data(self, *args): + assert self.cube.has_lazy_data() + _ = self.cube.data[0] + + +class DataRealisationTime(DataRealisation): + """Same as DataRealisation, but scaling over a time series - an unlimited dimension.""" + + param_names = ["number of time steps"] + + def setup(self, *args): + self.setup_common(dataset_name="Realisation", n_faces=1, n_times=args[0]) + + +class Callback: + params = [1, int(2e5)] + param_names = ["number of faces"] + + def setup_common(self, **kwargs): + def callback(cube, field, filename): + return cube[::2] + + self.data_path = synthetic_data(**kwargs) + self.callback = callback + + def setup(self, *args): + self.setup_common(dataset_name="Loading", n_faces=args[0]) + + def time_load_file_callback(self, *args): + _ = load_cube(str(self.data_path), callback=self.callback) + + +class CallbackTime(Callback): + """Same as Callback, but scaling over a time series - an unlimited dimension.""" + + param_names = ["number of time steps"] + + def setup(self, *args): + self.setup_common(dataset_name="Loading", n_faces=1, n_times=args[0]) diff --git a/benchmarks/benchmarks/merge_concat.py b/benchmarks/benchmarks/merge_concat.py new file mode 100644 index 0000000000..2d3738683a --- /dev/null +++ b/benchmarks/benchmarks/merge_concat.py @@ -0,0 +1,72 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Benchmarks relating to :meth:`iris.cube.CubeList.merge` and ``concatenate``.""" + +import warnings + +import numpy as np + +from iris.cube import CubeList +from iris.warnings import IrisVagueMetadataWarning + +from .generate_data.stock import realistic_4d_w_everything + + +class Merge: + # TODO: Improve coverage. + + cube_list: CubeList + + def setup(self): + source_cube = realistic_4d_w_everything() + + # Merge does not yet fully support cell measures and ancillary variables. + for cm in source_cube.cell_measures(): + source_cube.remove_cell_measure(cm) + for av in source_cube.ancillary_variables(): + source_cube.remove_ancillary_variable(av) + + second_cube = source_cube.copy() + scalar_coord = second_cube.coords(dimensions=[])[0] + scalar_coord.points = scalar_coord.points + 1 + self.cube_list = CubeList([source_cube, second_cube]) + + def time_merge(self): + _ = self.cube_list.merge_cube() + + def tracemalloc_merge(self): + _ = self.cube_list.merge_cube() + + tracemalloc_merge.number = 3 # type: ignore[attr-defined] + + +class Concatenate: + # TODO: Improve coverage. + + cube_list: CubeList + + params = [[False, True]] + param_names = ["Lazy operations"] + + def setup(self, lazy_run: bool): + warnings.filterwarnings("ignore", message="Ignoring a datum") + warnings.filterwarnings("ignore", category=IrisVagueMetadataWarning) + source_cube = realistic_4d_w_everything(lazy=lazy_run) + self.cube_list = CubeList([source_cube]) + for _ in range(24): + next_cube = self.cube_list[-1].copy() + first_dim_coord = next_cube.coord(dimensions=0, dim_coords=True) + first_dim_coord.points = ( + first_dim_coord.points + np.ptp(first_dim_coord.points) + 1 + ) + self.cube_list.append(next_cube) + + def time_concatenate(self, _): + _ = self.cube_list.concatenate_cube() + + def tracemalloc_concatenate(self, _): + _ = self.cube_list.concatenate_cube() + + tracemalloc_concatenate.number = 3 # type: ignore[attr-defined] diff --git a/benchmarks/benchmarks/mesh/__init__.py b/benchmarks/benchmarks/mesh/__init__.py new file mode 100644 index 0000000000..9cc76ce0aa --- /dev/null +++ b/benchmarks/benchmarks/mesh/__init__.py @@ -0,0 +1,5 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Benchmark tests for the iris.mesh module.""" diff --git a/benchmarks/benchmarks/mesh/utils/__init__.py b/benchmarks/benchmarks/mesh/utils/__init__.py new file mode 100644 index 0000000000..e20973c0a7 --- /dev/null +++ b/benchmarks/benchmarks/mesh/utils/__init__.py @@ -0,0 +1,5 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Benchmark tests for the iris.mesh.utils module.""" diff --git a/benchmarks/benchmarks/mesh/utils/regions_combine.py b/benchmarks/benchmarks/mesh/utils/regions_combine.py new file mode 100644 index 0000000000..a61deea56d --- /dev/null +++ b/benchmarks/benchmarks/mesh/utils/regions_combine.py @@ -0,0 +1,227 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Benchmarks stages of operation. + +Benchmarks stages of operation of the function +:func:`iris.mesh.utils.recombine_submeshes`. + +""" + +import os + +import dask.array as da +import numpy as np + +from iris import load, load_cube, save +from iris.mesh.utils import recombine_submeshes + +from ...generate_data.ugrid import make_cube_like_2d_cubesphere + + +class MixinCombineRegions: + # Characterise time taken + memory-allocated, for various stages of combine + # operations on cubesphere-like test data. + params = [50, 500] + param_names = ["cubesphere-N"] + + def _parametrised_cache_filename(self, n_cubesphere, content_name): + return f"cube_C{n_cubesphere}_{content_name}.nc" + + def _make_region_cubes(self, full_mesh_cube): + """Make a fixed number of region cubes from a full meshcube.""" + # Divide the cube into regions. + n_faces = full_mesh_cube.shape[-1] + # Start with a simple list of face indices + # first extend to multiple of 5 + n_faces_5s = 5 * ((n_faces + 1) // 5) + i_faces = np.arange(n_faces_5s, dtype=int) + # reshape (5N,) to (N, 5) + i_faces = i_faces.reshape((n_faces_5s // 5, 5)) + # reorder [2, 3, 4, 0, 1] within each block of 5 + i_faces = np.concatenate([i_faces[:, 2:], i_faces[:, :2]], axis=1) + # flatten to get [2 3 4 0 1 (-) 8 9 10 6 7 (-) 13 14 15 11 12 ...] + i_faces = i_faces.flatten() + # reduce back to original length, wrap any overflows into valid range + i_faces = i_faces[:n_faces] % n_faces + + # Divide into regions -- always slightly uneven, since 7 doesn't divide + n_regions = 7 + n_facesperregion = n_faces // n_regions + i_face_regions = (i_faces // n_facesperregion) % n_regions + region_inds = [ + np.where(i_face_regions == i_region)[0] for i_region in range(n_regions) + ] + # NOTE: this produces 7 regions, with near-adjacent value ranges but + # with some points "moved" to an adjacent region. + # Also, region-0 is bigger (because of not dividing by 7). + + # Finally, make region cubes with these indices. + region_cubes = [full_mesh_cube[..., inds] for inds in region_inds] + return region_cubes + + def setup_cache(self): + """Cache all the necessary source data on disk.""" + # Control dask, to minimise memory usage + allow largest data. + self.fix_dask_settings() + + for n_cubesphere in self.params: + # Do for each parameter, since "setup_cache" is NOT parametrised + mesh_cube = make_cube_like_2d_cubesphere( + n_cube=n_cubesphere, with_mesh=True + ) + # Save to files which include the parameter in the names. + save( + mesh_cube, + self._parametrised_cache_filename(n_cubesphere, "meshcube"), + ) + region_cubes = self._make_region_cubes(mesh_cube) + save( + region_cubes, + self._parametrised_cache_filename(n_cubesphere, "regioncubes"), + ) + + def setup(self, n_cubesphere, imaginary_data=True, create_result_cube=True): + """Combine tests "standard" setup operation. + + Load the source cubes (full-mesh + region) from disk. + These are specific to the cubesize parameter. + The data is cached on disk rather than calculated, to avoid any + pre-loading of the process memory allocation. + + If 'imaginary_data' is set (default), the region cubes data is replaced + with lazy data in the form of a da.zeros(). Otherwise, the region data + is lazy data from the files. + + If 'create_result_cube' is set, create "self.combined_cube" containing + the (still lazy) result. + + NOTE: various test classes override + extend this. + + """ + # Load source cubes (full-mesh and regions) + self.full_mesh_cube = load_cube( + self._parametrised_cache_filename(n_cubesphere, "meshcube") + ) + self.region_cubes = load( + self._parametrised_cache_filename(n_cubesphere, "regioncubes") + ) + + # Remove all var-names from loaded cubes, which can otherwise cause + # problems. Also implement 'imaginary' data. + for cube in self.region_cubes + [self.full_mesh_cube]: + cube.var_name = None + for coord in cube.coords(): + coord.var_name = None + if imaginary_data: + # Replace cube data (lazy file data) with 'imaginary' data. + # This has the same lazy-array attributes, but is allocated by + # creating chunks on demand instead of loading from file. + data = cube.lazy_data() + data = da.zeros(data.shape, dtype=data.dtype, chunks=data.chunksize) + cube.data = data + + if create_result_cube: + self.recombined_cube = self.recombine() + + # Fix dask usage mode for all the subsequent performance tests. + self.fix_dask_settings() + + def fix_dask_settings(self): + """Fix "standard" dask behaviour for time+space testing. + + Currently this is single-threaded mode, with known chunksize, + which is optimised for space saving so we can test largest data. + + """ + import dask.config as dcfg + + # Use single-threaded, to avoid process-switching costs and minimise memory usage. + # N.B. generally may be slower, but use less memory ? + dcfg.set(scheduler="single-threaded") + # Configure iris._lazy_data.as_lazy_data to aim for 100Mb chunks + dcfg.set({"array.chunk-size": "128Mib"}) + + def recombine(self): + # A handy general shorthand for the main "combine" operation. + result = recombine_submeshes( + self.full_mesh_cube, + self.region_cubes, + index_coord_name="i_mesh_face", + ) + return result + + +class CombineRegionsCreateCube(MixinCombineRegions): + """Time+memory costs of creating a combined-regions cube. + + The result is lazy, and we don't do the actual calculation. + + """ + + def setup(self, n_cubesphere): + # In this case only, do *not* create the result cube. + # That is the operation we want to test. + super().setup(n_cubesphere, create_result_cube=False) + + def time_create_combined_cube(self, n_cubesphere): + self.recombine() + + def tracemalloc_create_combined_cube(self, n_cubesphere): + self.recombine() + + +class CombineRegionsComputeRealData(MixinCombineRegions): + """Time+memory costs of computing combined-regions data.""" + + def time_compute_data(self, n_cubesphere): + _ = self.recombined_cube.data + + def tracemalloc_compute_data(self, n_cubesphere): + _ = self.recombined_cube.data + + +class CombineRegionsSaveData(MixinCombineRegions): + """Test saving *only*. + + Test saving *only*, having replaced the input cube data with 'imaginary' + array data, so that input data is not loaded from disk during the save + operation. + + + """ + + def time_save(self, n_cubesphere): + # Save to disk, which must compute data + stream it to file. + save(self.recombined_cube, "tmp.nc") + + def tracemalloc_save(self, n_cubesphere): + save(self.recombined_cube, "tmp.nc") + + def track_filesize_saved(self, n_cubesphere): + save(self.recombined_cube, "tmp.nc") + return os.path.getsize("tmp.nc") * 1.0e-6 + + +CombineRegionsSaveData.track_filesize_saved.unit = "Mb" # type: ignore[attr-defined] + + +class CombineRegionsFileStreamedCalc(MixinCombineRegions): + """Test the whole cost of file-to-file streaming. + + Uses the combined cube which is based on lazy data loading from the region + cubes on disk. + """ + + def setup(self, n_cubesphere): + # In this case only, do *not* replace the loaded regions data with + # 'imaginary' data, as we want to test file-to-file calculation+save. + super().setup(n_cubesphere, imaginary_data=False) + + def time_stream_file2file(self, n_cubesphere): + # Save to disk, which must compute data + stream it to file. + save(self.recombined_cube, "tmp.nc") + + def tracemalloc_stream_file2file(self, n_cubesphere): + save(self.recombined_cube, "tmp.nc") diff --git a/benchmarks/benchmarks/plot.py b/benchmarks/benchmarks/plot.py new file mode 100644 index 0000000000..e8fbb5372d --- /dev/null +++ b/benchmarks/benchmarks/plot.py @@ -0,0 +1,34 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Plot benchmark tests.""" + +import matplotlib as mpl +import numpy as np + +from iris import coords, cube, plot + +mpl.use("agg") + + +class AuxSort: + def setup(self): + # Manufacture data from which contours can be derived. + # Should generate 10 distinct contours, regardless of dim size. + dim_size = 200 + repeat_number = int(dim_size / 10) + repeat_range = range(int((dim_size**2) / repeat_number)) + data = np.repeat(repeat_range, repeat_number) + data = data.reshape((dim_size,) * 2) + + # These benchmarks are from a user perspective, so setting up a + # user-level case that will prompt the calling of aux_coords.sort in plot.py. + dim_coord = coords.DimCoord(np.arange(dim_size)) + local_cube = cube.Cube(data) + local_cube.add_aux_coord(dim_coord, 0) + self.cube = local_cube + + def time_aux_sort(self): + # Contour plot arbitrarily picked. Known to prompt aux_coords.sort. + plot.contour(self.cube) diff --git a/benchmarks/benchmarks/regridding.py b/benchmarks/benchmarks/regridding.py new file mode 100644 index 0000000000..e227da0ec6 --- /dev/null +++ b/benchmarks/benchmarks/regridding.py @@ -0,0 +1,119 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Regridding benchmark test.""" + +# import iris tests first so that some things can be initialised before +# importing anything else +from iris import tests # isort:skip + +import numpy as np + +import iris +from iris.analysis import AreaWeighted, PointInCell +from iris.coords import AuxCoord + + +class HorizontalChunkedRegridding: + def setup(self) -> None: + # Prepare a cube and a template + + cube_file_path = tests.get_data_path(["NetCDF", "regrid", "regrid_xyt.nc"]) + self.cube = iris.load_cube(cube_file_path) + + # Prepare a tougher cube and chunk it + chunked_cube_file_path = tests.get_data_path( + ["NetCDF", "regrid", "regrid_xyt.nc"] + ) + self.chunked_cube = iris.load_cube(chunked_cube_file_path) + + # Chunked data makes the regridder run repeatedly + self.cube.data = self.cube.lazy_data().rechunk((1, -1, -1)) + + template_file_path = tests.get_data_path( + ["NetCDF", "regrid", "regrid_template_global_latlon.nc"] + ) + self.template_cube = iris.load_cube(template_file_path) + + # Prepare a regridding scheme + self.scheme_area_w = AreaWeighted() + + def time_regrid_area_w(self) -> None: + # Regrid the cube onto the template. + out = self.cube.regrid(self.template_cube, self.scheme_area_w) + # Realise the data + out.data + + def time_regrid_area_w_new_grid(self) -> None: + # Regrid the chunked cube + out = self.chunked_cube.regrid(self.template_cube, self.scheme_area_w) + # Realise data + out.data + + def tracemalloc_regrid_area_w(self) -> None: + # Regrid the chunked cube + out = self.cube.regrid(self.template_cube, self.scheme_area_w) + # Realise data + out.data + + tracemalloc_regrid_area_w.number = 3 # type: ignore[attr-defined] + + def tracemalloc_regrid_area_w_new_grid(self) -> None: + # Regrid the chunked cube + out = self.chunked_cube.regrid(self.template_cube, self.scheme_area_w) + # Realise data + out.data + + tracemalloc_regrid_area_w_new_grid.number = 3 # type: ignore[attr-defined] + + +class CurvilinearRegridding: + def setup(self) -> None: + # Prepare a cube and a template + + cube_file_path = tests.get_data_path(["NetCDF", "regrid", "regrid_xyt.nc"]) + self.cube = iris.load_cube(cube_file_path) + + # Make the source cube curvilinear + x_coord = self.cube.coord("longitude") + y_coord = self.cube.coord("latitude") + xx, yy = np.meshgrid(x_coord.points, y_coord.points) + self.cube.remove_coord(x_coord) + self.cube.remove_coord(y_coord) + x_coord_2d = AuxCoord( + xx, + standard_name=x_coord.standard_name, + units=x_coord.units, + coord_system=x_coord.coord_system, + ) + y_coord_2d = AuxCoord( + yy, + standard_name=y_coord.standard_name, + units=y_coord.units, + coord_system=y_coord.coord_system, + ) + self.cube.add_aux_coord(x_coord_2d, (1, 2)) + self.cube.add_aux_coord(y_coord_2d, (1, 2)) + + template_file_path = tests.get_data_path( + ["NetCDF", "regrid", "regrid_template_global_latlon.nc"] + ) + self.template_cube = iris.load_cube(template_file_path) + + # Prepare a regridding scheme + self.scheme_pic = PointInCell() + + def time_regrid_pic(self) -> None: + # Regrid the cube onto the template. + out = self.cube.regrid(self.template_cube, self.scheme_pic) + # Realise the data + out.data + + def tracemalloc_regrid_pic(self) -> None: + # Regrid the cube onto the template. + out = self.cube.regrid(self.template_cube, self.scheme_pic) + # Realise the data + out.data + + tracemalloc_regrid_pic.number = 3 # type: ignore[attr-defined] diff --git a/benchmarks/benchmarks/save.py b/benchmarks/benchmarks/save.py new file mode 100644 index 0000000000..4bac1b1450 --- /dev/null +++ b/benchmarks/benchmarks/save.py @@ -0,0 +1,43 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""File saving benchmarks.""" + +from iris import save +from iris.mesh import save_mesh + +from .generate_data.ugrid import make_cube_like_2d_cubesphere + + +class NetcdfSave: + params = [[50, 600], [False, True]] + param_names = ["cubesphere-N", "is_unstructured"] + + def setup(self, n_cubesphere, is_unstructured): + self.cube = make_cube_like_2d_cubesphere( + n_cube=n_cubesphere, with_mesh=is_unstructured + ) + + def _save_data(self, cube, do_copy=True): + if do_copy: + # Copy the cube, to avoid distorting the results by changing it + # Because we known that older Iris code realises lazy coords + cube = cube.copy() + save(cube, "tmp.nc") + + def _save_mesh(self, cube): + # In this case, we are happy that the mesh is *not* modified + save_mesh(cube.mesh, "mesh.nc") + + def time_netcdf_save_cube(self, n_cubesphere, is_unstructured): + self._save_data(self.cube) + + def time_netcdf_save_mesh(self, n_cubesphere, is_unstructured): + if is_unstructured: + self._save_mesh(self.cube) + + def tracemalloc_netcdf_save(self, n_cubesphere, is_unstructured): + # Don't need to copy the cube here since track_ benchmarks don't + # do repeats between self.setup() calls. + self._save_data(self.cube, do_copy=False) diff --git a/benchmarks/benchmarks/sperf/__init__.py b/benchmarks/benchmarks/sperf/__init__.py new file mode 100644 index 0000000000..2b8b508fd5 --- /dev/null +++ b/benchmarks/benchmarks/sperf/__init__.py @@ -0,0 +1,38 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Benchmarks for the SPerf scheme of the UK Met Office's NG-VAT project. + +SPerf = assessing performance against a series of increasingly large LFRic +datasets. +""" + +from iris import load_cube + +from ..generate_data.ugrid import make_cubesphere_testfile + + +class FileMixin: + """For use in any benchmark classes that work on a file.""" + + # Allows time for large file generation. + timeout = 3600.0 + # Largest file with these params: ~90GB. + # Total disk space: ~410GB. + params = [ + [12, 384, 640, 960, 1280, 1668], + [1, 36, 72], + [1, 3, 10], + ] + param_names = ["cubesphere_C", "N levels", "N time steps"] + # cubesphere_C: notation refers to faces per panel. + # e.g. C1 is 6 faces, 8 nodes + + def setup(self, c_size, n_levels, n_times): + self.file_path = make_cubesphere_testfile( + c_size=c_size, n_levels=n_levels, n_times=n_times + ) + + def load_cube(self): + return load_cube(str(self.file_path)) diff --git a/benchmarks/benchmarks/sperf/combine_regions.py b/benchmarks/benchmarks/sperf/combine_regions.py new file mode 100644 index 0000000000..591b7bb9be --- /dev/null +++ b/benchmarks/benchmarks/sperf/combine_regions.py @@ -0,0 +1,234 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Region combine benchmarks for the SPerf scheme of the UK Met Office's NG-VAT project.""" + +import os.path + +from dask import array as da +import numpy as np + +from iris import load, load_cube, save +from iris.mesh.utils import recombine_submeshes + +from .. import on_demand_benchmark +from ..generate_data.ugrid import BENCHMARK_DATA, make_cube_like_2d_cubesphere + + +class Mixin: + # Characterise time taken + memory-allocated, for various stages of combine + # operations on cubesphere-like test data. + timeout = 300.0 + params = [100, 200, 300, 500, 1000, 1668] + param_names = ["cubesphere_C"] + # Fix result units for the tracking benchmarks. + unit = "Mb" + temp_save_path = BENCHMARK_DATA / "tmp.nc" + + def _parametrised_cache_filename(self, n_cubesphere, content_name): + return BENCHMARK_DATA / f"cube_C{n_cubesphere}_{content_name}.nc" + + def _make_region_cubes(self, full_mesh_cube): + """Make a fixed number of region cubes from a full meshcube.""" + # Divide the cube into regions. + n_faces = full_mesh_cube.shape[-1] + # Start with a simple list of face indices + # first extend to multiple of 5 + n_faces_5s = 5 * ((n_faces + 1) // 5) + i_faces = np.arange(n_faces_5s, dtype=int) + # reshape (5N,) to (N, 5) + i_faces = i_faces.reshape((n_faces_5s // 5, 5)) + # reorder [2, 3, 4, 0, 1] within each block of 5 + i_faces = np.concatenate([i_faces[:, 2:], i_faces[:, :2]], axis=1) + # flatten to get [2 3 4 0 1 (-) 8 9 10 6 7 (-) 13 14 15 11 12 ...] + i_faces = i_faces.flatten() + # reduce back to original length, wrap any overflows into valid range + i_faces = i_faces[:n_faces] % n_faces + + # Divide into regions -- always slightly uneven, since 7 doesn't divide + n_regions = 7 + n_facesperregion = n_faces // n_regions + i_face_regions = (i_faces // n_facesperregion) % n_regions + region_inds = [ + np.where(i_face_regions == i_region)[0] for i_region in range(n_regions) + ] + # NOTE: this produces 7 regions, with near-adjacent value ranges but + # with some points "moved" to an adjacent region. + # Also, region-0 is bigger (because of not dividing by 7). + + # Finally, make region cubes with these indices. + region_cubes = [full_mesh_cube[..., inds] for inds in region_inds] + return region_cubes + + def setup_cache(self): + """Cache all the necessary source data on disk.""" + # Control dask, to minimise memory usage + allow largest data. + self.fix_dask_settings() + + for n_cubesphere in self.params: + # Do for each parameter, since "setup_cache" is NOT parametrised + mesh_cube = make_cube_like_2d_cubesphere( + n_cube=n_cubesphere, with_mesh=True + ) + # Save to files which include the parameter in the names. + save( + mesh_cube, + self._parametrised_cache_filename(n_cubesphere, "meshcube"), + ) + region_cubes = self._make_region_cubes(mesh_cube) + save( + region_cubes, + self._parametrised_cache_filename(n_cubesphere, "regioncubes"), + ) + + def setup(self, n_cubesphere, imaginary_data=True, create_result_cube=True): + """Combine tests "standard" setup operation. + + Load the source cubes (full-mesh + region) from disk. + These are specific to the cubesize parameter. + The data is cached on disk rather than calculated, to avoid any + pre-loading of the process memory allocation. + + If 'imaginary_data' is set (default), the region cubes data is replaced + with lazy data in the form of a da.zeros(). Otherwise, the region data + is lazy data from the files. + + If 'create_result_cube' is set, create "self.combined_cube" containing + the (still lazy) result. + + NOTE: various test classes override + extend this. + + """ + # Load source cubes (full-mesh and regions) + self.full_mesh_cube = load_cube( + self._parametrised_cache_filename(n_cubesphere, "meshcube") + ) + self.region_cubes = load( + self._parametrised_cache_filename(n_cubesphere, "regioncubes") + ) + + # Remove all var-names from loaded cubes, which can otherwise cause + # problems. Also implement 'imaginary' data. + for cube in self.region_cubes + [self.full_mesh_cube]: + cube.var_name = None + for coord in cube.coords(): + coord.var_name = None + if imaginary_data: + # Replace cube data (lazy file data) with 'imaginary' data. + # This has the same lazy-array attributes, but is allocated by + # creating chunks on demand instead of loading from file. + data = cube.lazy_data() + data = da.zeros(data.shape, dtype=data.dtype, chunks=data.chunksize) + cube.data = data + + if create_result_cube: + self.recombined_cube = self.recombine() + + # Fix dask usage mode for all the subsequent performance tests. + self.fix_dask_settings() + + def teardown(self, _): + self.temp_save_path.unlink(missing_ok=True) + + def fix_dask_settings(self): + """Fix "standard" dask behaviour for time+space testing. + + Currently this is single-threaded mode, with known chunksize, + which is optimised for space saving so we can test largest data. + + """ + import dask.config as dcfg + + # Use single-threaded, to avoid process-switching costs and minimise memory usage. + # N.B. generally may be slower, but use less memory ? + dcfg.set(scheduler="single-threaded") + # Configure iris._lazy_data.as_lazy_data to aim for 100Mb chunks + dcfg.set({"array.chunk-size": "128Mib"}) + + def recombine(self): + # A handy general shorthand for the main "combine" operation. + result = recombine_submeshes( + self.full_mesh_cube, + self.region_cubes, + index_coord_name="i_mesh_face", + ) + return result + + def save_recombined_cube(self): + save(self.recombined_cube, self.temp_save_path) + + +@on_demand_benchmark +class CreateCube(Mixin): + """Time+memory costs of creating a combined-regions cube. + + The result is lazy, and we don't do the actual calculation. + + """ + + def setup(self, n_cubesphere, imaginary_data=True, create_result_cube=False): + # In this case only, do *not* create the result cube. + # That is the operation we want to test. + super().setup(n_cubesphere, imaginary_data, create_result_cube) + + def time_create_combined_cube(self, n_cubesphere): + self.recombine() + + def tracemalloc_create_combined_cube(self, n_cubesphere): + self.recombine() + + +@on_demand_benchmark +class ComputeRealData(Mixin): + """Time+memory costs of computing combined-regions data.""" + + def time_compute_data(self, n_cubesphere): + _ = self.recombined_cube.data + + def tracemalloc_compute_data(self, n_cubesphere): + _ = self.recombined_cube.data + + +@on_demand_benchmark +class SaveData(Mixin): + """Test saving *only*. + + Test saving *only*, having replaced the input cube data with 'imaginary' + array data, so that input data is not loaded from disk during the save + operation. + + """ + + def time_save(self, n_cubesphere): + # Save to disk, which must compute data + stream it to file. + self.save_recombined_cube() + + def tracemalloc_save(self, n_cubesphere): + self.save_recombined_cube() + + def track_filesize_saved(self, n_cubesphere): + self.save_recombined_cube() + return self.temp_save_path.stat().st_size * 1.0e-6 + + +@on_demand_benchmark +class FileStreamedCalc(Mixin): + """Test the whole cost of file-to-file streaming. + + Uses the combined cube which is based on lazy data loading from the region + cubes on disk. + + """ + + def setup(self, n_cubesphere, imaginary_data=False, create_result_cube=True): + # In this case only, do *not* replace the loaded regions data with + # 'imaginary' data, as we want to test file-to-file calculation+save. + super().setup(n_cubesphere, imaginary_data, create_result_cube) + + def time_stream_file2file(self, n_cubesphere): + # Save to disk, which must compute data + stream it to file. + self.save_recombined_cube() + + def tracemalloc_stream_file2file(self, n_cubesphere): + self.save_recombined_cube() diff --git a/benchmarks/benchmarks/sperf/equality.py b/benchmarks/benchmarks/sperf/equality.py new file mode 100644 index 0000000000..ddee90cd28 --- /dev/null +++ b/benchmarks/benchmarks/sperf/equality.py @@ -0,0 +1,35 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Equality benchmarks for the SPerf scheme of the UK Met Office's NG-VAT project.""" + +from .. import on_demand_benchmark +from . import FileMixin + + +@on_demand_benchmark +class CubeEquality(FileMixin): + r"""Benchmark time and memory costs. + + Benchmark time and memory costs of comparing :class:`~iris.cube.Cube`\\ s + with attached :class:`~iris.mesh.MeshXY`\\ es. + + Uses :class:`FileMixin` as the realistic case will be comparing + :class:`~iris.cube.Cube`\\ s that have been loaded from file. + + """ + + # Cut down paremt parameters. + params = [FileMixin.params[0]] + + def setup(self, c_size, n_levels=1, n_times=1): + super().setup(c_size, n_levels, n_times) + self.cube = self.load_cube() + self.other_cube = self.load_cube() + + def peakmem_eq(self, n_cube): + _ = self.cube == self.other_cube + + def time_eq(self, n_cube): + _ = self.cube == self.other_cube diff --git a/benchmarks/benchmarks/sperf/load.py b/benchmarks/benchmarks/sperf/load.py new file mode 100644 index 0000000000..d304a30c82 --- /dev/null +++ b/benchmarks/benchmarks/sperf/load.py @@ -0,0 +1,27 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""File loading benchmarks for the SPerf scheme of the UK Met Office's NG-VAT project.""" + +from .. import on_demand_benchmark +from . import FileMixin + + +@on_demand_benchmark +class Load(FileMixin): + def time_load_cube(self, _, __, ___): + _ = self.load_cube() + + +@on_demand_benchmark +class Realise(FileMixin): + def setup(self, c_size, n_levels, n_times): + super().setup(c_size, n_levels, n_times) + self.loaded_cube = self.load_cube() + + def time_realise_cube(self, _, __, ___): + # Don't touch loaded_cube.data - permanent realisation plays badly with + # ASV's re-run strategy. + assert self.loaded_cube.has_lazy_data() + self.loaded_cube.core_data().compute() diff --git a/benchmarks/benchmarks/sperf/save.py b/benchmarks/benchmarks/sperf/save.py new file mode 100644 index 0000000000..a715ec2424 --- /dev/null +++ b/benchmarks/benchmarks/sperf/save.py @@ -0,0 +1,50 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""File saving benchmarks for the SPerf scheme of the UK Met Office's NG-VAT project.""" + +import os.path + +from iris import save +from iris.mesh import save_mesh + +from .. import on_demand_benchmark +from ..generate_data.ugrid import make_cube_like_2d_cubesphere + + +@on_demand_benchmark +class NetcdfSave: + """Benchmark time and memory costs of saving ~large-ish data cubes to netcdf.""" + + params = [[1, 100, 200, 300, 500, 1000, 1668], [False, True]] + param_names = ["cubesphere_C", "is_unstructured"] + # Fix result units for the tracking benchmarks. + unit = "Mb" + + def setup(self, n_cubesphere, is_unstructured): + self.cube = make_cube_like_2d_cubesphere( + n_cube=n_cubesphere, with_mesh=is_unstructured + ) + + def _save_cube(self, cube): + save(cube, "tmp.nc") + + def _save_mesh(self, cube): + save_mesh(cube.mesh, "mesh.nc") + + def time_save_cube(self, n_cubesphere, is_unstructured): + self._save_cube(self.cube) + + def tracemalloc_save_cube(self, n_cubesphere, is_unstructured): + self._save_cube(self.cube) + + def time_save_mesh(self, n_cubesphere, is_unstructured): + if is_unstructured: + self._save_mesh(self.cube) + + # The filesizes make a good reference point for the 'addedmem' memory + # usage results. + def track_filesize_save_cube(self, n_cubesphere, is_unstructured): + self._save_cube(self.cube) + return os.path.getsize("tmp.nc") * 1.0e-6 diff --git a/benchmarks/benchmarks/stats.py b/benchmarks/benchmarks/stats.py new file mode 100644 index 0000000000..fbab12cd4b --- /dev/null +++ b/benchmarks/benchmarks/stats.py @@ -0,0 +1,52 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Stats benchmark tests.""" + +import iris +from iris.analysis.stats import pearsonr +import iris.tests + + +class PearsonR: + def setup(self): + cube_temp = iris.load_cube( + iris.tests.get_data_path( + ("NetCDF", "global", "xyt", "SMALL_total_column_co2.nc") + ) + ) + + # Make data non-lazy. + cube_temp.data + + self.cube_a = cube_temp[:6] + self.cube_b = cube_temp[20:26] + self.cube_b.replace_coord(self.cube_a.coord("time")) + for name in ["latitude", "longitude"]: + self.cube_b.coord(name).guess_bounds() + self.weights = iris.analysis.cartography.area_weights(self.cube_b) + + def time_real(self): + pearsonr(self.cube_a, self.cube_b, weights=self.weights) + + def tracemalloc_real(self): + pearsonr(self.cube_a, self.cube_b, weights=self.weights) + + tracemalloc_real.number = 3 # type: ignore[attr-defined] + + def time_lazy(self): + for cube in self.cube_a, self.cube_b: + cube.data = cube.lazy_data() + + result = pearsonr(self.cube_a, self.cube_b, weights=self.weights) + result.data + + def tracemalloc_lazy(self): + for cube in self.cube_a, self.cube_b: + cube.data = cube.lazy_data() + + result = pearsonr(self.cube_a, self.cube_b, weights=self.weights) + result.data + + tracemalloc_lazy.number = 3 # type: ignore[attr-defined] diff --git a/benchmarks/benchmarks/trajectory.py b/benchmarks/benchmarks/trajectory.py new file mode 100644 index 0000000000..77825ef2f2 --- /dev/null +++ b/benchmarks/benchmarks/trajectory.py @@ -0,0 +1,56 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Trajectory benchmark test.""" + +# import iris tests first so that some things can be initialised before +# importing anything else +from iris import tests # isort:skip + +import numpy as np + +import iris +from iris.analysis.trajectory import interpolate + + +class TrajectoryInterpolation: + def setup(self) -> None: + # Prepare a cube and a template + + cube_file_path = tests.get_data_path(["NetCDF", "regrid", "regrid_xyt.nc"]) + self.cube = iris.load_cube(cube_file_path) + + trajectory = np.array([np.array((-50 + i, -50 + i)) for i in range(100)]) + self.sample_points = [ + ("longitude", trajectory[:, 0]), + ("latitude", trajectory[:, 1]), + ] + + def time_trajectory_linear(self) -> None: + # Regrid the cube onto the template. + out_cube = interpolate(self.cube, self.sample_points, method="linear") + # Realise the data + out_cube.data + + def tracemalloc_trajectory_linear(self) -> None: + # Regrid the cube onto the template. + out_cube = interpolate(self.cube, self.sample_points, method="linear") + # Realise the data + out_cube.data + + tracemalloc_trajectory_linear.number = 3 # type: ignore[attr-defined] + + def time_trajectory_nearest(self) -> None: + # Regrid the cube onto the template. + out_cube = interpolate(self.cube, self.sample_points, method="nearest") + # Realise the data + out_cube.data + + def tracemalloc_trajectory_nearest(self) -> None: + # Regrid the cube onto the template. + out_cube = interpolate(self.cube, self.sample_points, method="nearest") + # Realise the data + out_cube.data + + tracemalloc_trajectory_nearest.number = 3 # type: ignore[attr-defined] diff --git a/benchmarks/benchmarks/unit_style/__init__disabled.py b/benchmarks/benchmarks/unit_style/__init__disabled.py new file mode 100644 index 0000000000..d7f84c2b91 --- /dev/null +++ b/benchmarks/benchmarks/unit_style/__init__disabled.py @@ -0,0 +1,16 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Small-scope benchmarks that can help with performance investigations. + +By renaming ``__init__.py`` these are all disabled by default: + +- They bloat benchmark run-time. +- They are too vulnerable to 'noise' due to their small scope - small objects, + short operations - they report a lot of false positive regressions. +- We rely on the wider-scope integration-style benchmarks to flag performance + changes, upon which we expect to do some manual investigation - these + smaller benchmarks can be run then. + +""" diff --git a/benchmarks/benchmarks/unit_style/aux_factory.py b/benchmarks/benchmarks/unit_style/aux_factory.py new file mode 100644 index 0000000000..329a2b0bda --- /dev/null +++ b/benchmarks/benchmarks/unit_style/aux_factory.py @@ -0,0 +1,52 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Small-scope AuxFactory benchmark tests.""" + +import numpy as np + +from iris import aux_factory, coords + + +class FactoryCommon: + # TODO: once https://github.com/airspeed-velocity/asv/pull/828 is released: + # * make class an ABC + # * remove NotImplementedError + # * combine setup_common into setup + """Run a generalised suite of benchmarks for any factory. + + A base class running a generalised suite of benchmarks for any factory. + Factory to be specified in a subclass. + + ASV will run the benchmarks within this class for any subclasses. + + Should only be instantiated within subclasses, but cannot enforce this + since ASV cannot handle classes that include abstract methods. + """ + + def setup(self): + """Prevent ASV instantiating (must therefore override setup() in any subclasses.).""" + raise NotImplementedError + + def setup_common(self): + """Shared setup code that can be called by subclasses.""" + self.factory = self.create() + + def time_create(self): + """Create an instance of the benchmarked factory. + + Create method is specified in the subclass. + """ + self.create() + + +class HybridHeightFactory(FactoryCommon): + def setup(self): + data_1d = np.zeros(1000) + self.coord = coords.AuxCoord(points=data_1d, units="m") + + self.setup_common() + + def create(self): + return aux_factory.HybridHeightFactory(delta=self.coord) diff --git a/benchmarks/benchmarks/unit_style/coords.py b/benchmarks/benchmarks/unit_style/coords.py new file mode 100644 index 0000000000..704746f190 --- /dev/null +++ b/benchmarks/benchmarks/unit_style/coords.py @@ -0,0 +1,129 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Small-scope Coord benchmark tests.""" + +import numpy as np + +from iris import coords + +from .. import disable_repeat_between_setup + + +def setup(): + """General variables needed by multiple benchmark classes.""" + global data_1d + + data_1d = np.zeros(1000) + + +class CoordCommon: + # TODO: once https://github.com/airspeed-velocity/asv/pull/828 is released: + # * make class an ABC + # * remove NotImplementedError + # * combine setup_common into setup + """Run a generalised suite of benchmarks for any coord. + + A base class running a generalised suite of benchmarks for any coord. + Coord to be specified in a subclass. + + ASV will run the benchmarks within this class for any subclasses. + + Should only be instantiated within subclasses, but cannot enforce this + since ASV cannot handle classes that include abstract methods. + """ + + def setup(self): + """Prevent ASV instantiating (must therefore override setup() in any subclasses.).""" + raise NotImplementedError + + def setup_common(self): + """Shared setup code that can be called by subclasses.""" + self.component = self.create() + + def time_create(self): + """Create an instance of the benchmarked factory. + + Create method is specified in the subclass. + """ + self.create() + + +class DimCoord(CoordCommon): + def setup(self): + point_values = np.arange(1000) + bounds = np.array([point_values - 1, point_values + 1]).transpose() + + self.create_kwargs = { + "points": point_values, + "bounds": bounds, + "units": "days since 1970-01-01", + "climatological": True, + } + + self.setup_common() + + def create(self): + return coords.DimCoord(**self.create_kwargs) + + def time_regular(self): + coords.DimCoord.from_regular(0, 1, 1000) + + +class AuxCoord(CoordCommon): + def setup(self): + bounds = np.array([data_1d - 1, data_1d + 1]).transpose() + + self.create_kwargs = { + "points": data_1d, + "bounds": bounds, + "units": "days since 1970-01-01", + "climatological": True, + } + + self.setup_common() + + def create(self): + return coords.AuxCoord(**self.create_kwargs) + + def time_points(self): + _ = self.component.points + + def time_bounds(self): + _ = self.component.bounds + + +@disable_repeat_between_setup +class AuxCoordLazy(AuxCoord): + """Lazy equivalent of :class:`AuxCoord`.""" + + def setup(self): + super().setup() + self.create_kwargs["points"] = self.component.lazy_points() + self.create_kwargs["bounds"] = self.component.lazy_bounds() + self.setup_common() + + +class CellMeasure(CoordCommon): + def setup(self): + self.setup_common() + + def create(self): + return coords.CellMeasure(data_1d) + + +class CellMethod(CoordCommon): + def setup(self): + self.setup_common() + + def create(self): + return coords.CellMethod("test") + + +class AncillaryVariable(CoordCommon): + def setup(self): + self.setup_common() + + def create(self): + return coords.AncillaryVariable(data_1d) diff --git a/benchmarks/benchmarks/unit_style/cube.py b/benchmarks/benchmarks/unit_style/cube.py new file mode 100644 index 0000000000..780418aa14 --- /dev/null +++ b/benchmarks/benchmarks/unit_style/cube.py @@ -0,0 +1,252 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Small-scope Cube benchmark tests.""" + +import numpy as np + +from iris import analysis, aux_factory, coords, cube + +from .. import disable_repeat_between_setup +from ..generate_data.stock import sample_meshcoord + + +def setup(*params): + """General variables needed by multiple benchmark classes.""" + global data_1d + global data_2d + global general_cube + + data_2d = np.zeros((1000,) * 2) + data_1d = data_2d[0] + general_cube = cube.Cube(data_2d) + + +class ComponentCommon: + # TODO: once https://github.com/airspeed-velocity/asv/pull/828 is released: + # * make class an ABC + # * remove NotImplementedError + # * combine setup_common into setup + """Run a generalised suite of benchmarks for cubes. + + A base class running a generalised suite of benchmarks for cubes that + include a specified component (e.g. Coord, CellMeasure etc.). Component to + be specified in a subclass. + + ASV will run the benchmarks within this class for any subclasses. + + Should only be instantiated within subclasses, but cannot enforce this + since ASV cannot handle classes that include abstract methods. + """ + + def setup(self): + """Prevent ASV instantiating (must therefore override setup() in any subclasses.).""" + raise NotImplementedError + + def create(self): + """Create a cube (generic). + + cube_kwargs allow dynamic inclusion of different components; + specified in subclasses. + """ + return cube.Cube(data=data_2d, **self.cube_kwargs) + + def setup_common(self): + """Shared setup code that can be called by subclasses.""" + self.cube = self.create() + + def time_create(self): + """Create a cube that includes an instance of the benchmarked component.""" + self.create() + + def time_add(self): + """Add an instance of the benchmarked component to an existing cube.""" + # Unable to create the copy during setup since this needs to be re-done + # for every repeat of the test (some components disallow duplicates). + general_cube_copy = general_cube.copy(data=data_2d) + self.add_method(general_cube_copy, *self.add_args) + + +class Cube: + def time_basic(self): + cube.Cube(data_2d) + + def time_rename(self): + general_cube.name = "air_temperature" + + +class AuxCoord(ComponentCommon): + def setup(self): + self.coord_name = "test" + coord_bounds = np.array([data_1d - 1, data_1d + 1]).transpose() + aux_coord = coords.AuxCoord( + long_name=self.coord_name, + points=data_1d, + bounds=coord_bounds, + units="days since 1970-01-01", + climatological=True, + ) + + # Variables needed by the ComponentCommon base class. + self.cube_kwargs = {"aux_coords_and_dims": [(aux_coord, 0)]} + self.add_method = cube.Cube.add_aux_coord + self.add_args = (aux_coord, (0)) + + self.setup_common() + + def time_return_coords(self): + self.cube.coords() + + def time_return_coord_dims(self): + self.cube.coord_dims(self.coord_name) + + +class AuxFactory(ComponentCommon): + def setup(self): + coord = coords.AuxCoord(points=data_1d, units="m") + self.hybrid_factory = aux_factory.HybridHeightFactory(delta=coord) + + # Variables needed by the ComponentCommon base class. + self.cube_kwargs = { + "aux_coords_and_dims": [(coord, 0)], + "aux_factories": [self.hybrid_factory], + } + + self.setup_common() + + # Variables needed by the overridden time_add benchmark in this subclass. + cube_w_coord = self.cube.copy() + [cube_w_coord.remove_aux_factory(i) for i in cube_w_coord.aux_factories] + self.cube_w_coord = cube_w_coord + + def time_add(self): + # Requires override from super().time_add because the cube needs an + # additional coord. + self.cube_w_coord.add_aux_factory(self.hybrid_factory) + + +class CellMeasure(ComponentCommon): + def setup(self): + cell_measure = coords.CellMeasure(data_1d) + + # Variables needed by the ComponentCommon base class. + self.cube_kwargs = {"cell_measures_and_dims": [(cell_measure, 0)]} + self.add_method = cube.Cube.add_cell_measure + self.add_args = (cell_measure, 0) + + self.setup_common() + + +class CellMethod(ComponentCommon): + def setup(self): + cell_method = coords.CellMethod("test") + + # Variables needed by the ComponentCommon base class. + self.cube_kwargs = {"cell_methods": [cell_method]} + self.add_method = cube.Cube.add_cell_method + self.add_args = [cell_method] + + self.setup_common() + + +class AncillaryVariable(ComponentCommon): + def setup(self): + ancillary_variable = coords.AncillaryVariable(data_1d) + + # Variables needed by the ComponentCommon base class. + self.cube_kwargs = {"ancillary_variables_and_dims": [(ancillary_variable, 0)]} + self.add_method = cube.Cube.add_ancillary_variable + self.add_args = (ancillary_variable, 0) + + self.setup_common() + + +class MeshCoord: + params = [ + 6, # minimal cube-sphere + int(1e6), # realistic cube-sphere size + 1000, # To match size in :class:`AuxCoord` + ] + param_names = ["number of faces"] + + def setup(self, n_faces): + mesh_kwargs = dict(n_nodes=n_faces + 2, n_edges=n_faces * 2, n_faces=n_faces) + + self.mesh_coord = sample_meshcoord(sample_mesh_kwargs=mesh_kwargs) + self.data = np.zeros(n_faces) + self.cube_blank = cube.Cube(data=self.data) + self.cube = self.create() + + def create(self): + return cube.Cube(data=self.data, aux_coords_and_dims=[(self.mesh_coord, 0)]) + + def time_create(self, n_faces): + _ = self.create() + + @disable_repeat_between_setup + def time_add(self, n_faces): + self.cube_blank.add_aux_coord(self.mesh_coord, 0) + + @disable_repeat_between_setup + def time_remove(self, n_faces): + self.cube.remove_coord(self.mesh_coord) + + +class Merge: + def setup(self): + self.cube_list = cube.CubeList() + for i in np.arange(2): + i_cube = general_cube.copy() + i_coord = coords.AuxCoord([i]) + i_cube.add_aux_coord(i_coord) + self.cube_list.append(i_cube) + + def time_merge(self): + self.cube_list.merge() + + +class Concatenate: + def setup(self): + dim_size = 1000 + self.cube_list = cube.CubeList() + for i in np.arange(dim_size * 2, step=dim_size): + i_cube = general_cube.copy() + i_coord = coords.DimCoord(np.arange(dim_size) + (i * dim_size)) + i_cube.add_dim_coord(i_coord, 0) + self.cube_list.append(i_cube) + + def time_concatenate(self): + self.cube_list.concatenate() + + +class Equality: + def setup(self): + self.cube_a = general_cube.copy() + self.cube_b = general_cube.copy() + + aux_coord = coords.AuxCoord(data_1d) + self.cube_a.add_aux_coord(aux_coord, 0) + self.cube_b.add_aux_coord(aux_coord, 1) + + def time_equality(self): + self.cube_a == self.cube_b + + +class Aggregation: + def setup(self): + repeat_number = 10 + repeat_range = range(int(1000 / repeat_number)) + array_repeat = np.repeat(repeat_range, repeat_number) + array_unique = np.arange(len(array_repeat)) + + coord_repeat = coords.AuxCoord(points=array_repeat, long_name="repeat") + coord_unique = coords.DimCoord(points=array_unique, long_name="unique") + + local_cube = general_cube.copy() + local_cube.add_aux_coord(coord_repeat, 0) + local_cube.add_dim_coord(coord_unique, 0) + self.cube = local_cube + + def time_aggregated_by(self): + self.cube.aggregated_by("repeat", analysis.MEAN) diff --git a/benchmarks/benchmarks/unit_style/mesh.py b/benchmarks/benchmarks/unit_style/mesh.py new file mode 100644 index 0000000000..ed3aad1428 --- /dev/null +++ b/benchmarks/benchmarks/unit_style/mesh.py @@ -0,0 +1,187 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Benchmark tests for the iris.mesh module.""" + +from copy import deepcopy + +import numpy as np + +from iris import mesh + +from .. import disable_repeat_between_setup +from ..generate_data.stock import sample_mesh + + +class UGridCommon: + """Run a generalised suite of benchmarks for any mesh object. + + A base class running a generalised suite of benchmarks for any mesh object. + Object to be specified in a subclass. + + ASV will run the benchmarks within this class for any subclasses. + + ASV will not benchmark this class as setup() triggers a NotImplementedError. + (ASV has not yet released ABC/abstractmethod support - asv#838). + + """ + + params = [ + 6, # minimal cube-sphere + int(1e6), # realistic cube-sphere size + ] + param_names = ["number of faces"] + + def setup(self, *params): + self.object = self.create() + + def create(self): + raise NotImplementedError + + def time_create(self, *params): + """Create an instance of the benchmarked object. + + create() method is specified in the subclass. + """ + self.create() + + +class Connectivity(UGridCommon): + def setup(self, n_faces): + self.array = np.zeros([n_faces, 3], dtype=int) + super().setup(n_faces) + + def create(self): + return mesh.Connectivity(indices=self.array, cf_role="face_node_connectivity") + + def time_indices(self, n_faces): + _ = self.object.indices + + def time_location_lengths(self, n_faces): + # Proofed against the Connectivity name change (633ed17). + if getattr(self.object, "src_lengths", False): + meth = self.object.src_lengths + else: + meth = self.object.location_lengths + _ = meth() + + def time_validate_indices(self, n_faces): + self.object.validate_indices() + + +@disable_repeat_between_setup +class ConnectivityLazy(Connectivity): + """Lazy equivalent of :class:`Connectivity`.""" + + def setup(self, n_faces): + super().setup(n_faces) + self.array = self.object.lazy_indices() + self.object = self.create() + + +class MeshXY(UGridCommon): + def setup(self, n_faces, lazy=False): + #### + # Steal everything from the sample mesh for benchmarking creation of a + # brand new mesh. + source_mesh = sample_mesh( + n_nodes=n_faces + 2, + n_edges=n_faces * 2, + n_faces=n_faces, + lazy_values=lazy, + ) + + def get_coords_and_axes(location): + return [ + (source_mesh.coord(axis=axis, location=location), axis) + for axis in ("x", "y") + ] + + self.mesh_kwargs = dict( + topology_dimension=source_mesh.topology_dimension, + node_coords_and_axes=get_coords_and_axes("node"), + connectivities=source_mesh.connectivities(), + edge_coords_and_axes=get_coords_and_axes("edge"), + face_coords_and_axes=get_coords_and_axes("face"), + ) + #### + + super().setup(n_faces) + + self.face_node = self.object.face_node_connectivity + self.node_x = self.object.node_coords.node_x + # Kwargs for reuse in search and remove methods. + self.connectivities_kwarg = dict(cf_role="edge_node_connectivity") + self.coords_kwarg = dict(location="face") + + # TODO: an opportunity for speeding up runtime if needed, since + # eq_object is not needed for all benchmarks. Just don't generate it + # within a benchmark - the execution time is large enough that it + # could be a significant portion of the benchmark - makes regressions + # smaller and could even pick up regressions in copying instead! + self.eq_object = deepcopy(self.object) + + def create(self): + return mesh.MeshXY(**self.mesh_kwargs) + + def time_add_connectivities(self, n_faces): + self.object.add_connectivities(self.face_node) + + def time_add_coords(self, n_faces): + self.object.add_coords(node_x=self.node_x) + + def time_connectivities(self, n_faces): + _ = self.object.connectivities(**self.connectivities_kwarg) + + def time_coords(self, n_faces): + _ = self.object.coords(**self.coords_kwarg) + + def time_eq(self, n_faces): + _ = self.object == self.eq_object + + def time_remove_connectivities(self, n_faces): + self.object.remove_connectivities(**self.connectivities_kwarg) + + def time_remove_coords(self, n_faces): + self.object.remove_coords(**self.coords_kwarg) + + +@disable_repeat_between_setup +class MeshXYLazy(MeshXY): + """Lazy equivalent of :class:`MeshXY`.""" + + def setup(self, n_faces, lazy=True): + super().setup(n_faces, lazy=lazy) + + +class MeshCoord(UGridCommon): + # Add extra parameter value to match AuxCoord benchmarking. + params = UGridCommon.params + [1000] + + def setup(self, n_faces, lazy=False): + self.mesh = sample_mesh( + n_nodes=n_faces + 2, + n_edges=n_faces * 2, + n_faces=n_faces, + lazy_values=lazy, + ) + + super().setup(n_faces) + + def create(self): + return mesh.MeshCoord(mesh=self.mesh, location="face", axis="x") + + def time_points(self, n_faces): + _ = self.object.points + + def time_bounds(self, n_faces): + _ = self.object.bounds + + +@disable_repeat_between_setup +class MeshCoordLazy(MeshCoord): + """Lazy equivalent of :class:`MeshCoord`.""" + + def setup(self, n_faces, lazy=True): + super().setup(n_faces, lazy=lazy) diff --git a/benchmarks/benchmarks/unit_style/metadata_manager_factory.py b/benchmarks/benchmarks/unit_style/metadata_manager_factory.py new file mode 100644 index 0000000000..0af055fa82 --- /dev/null +++ b/benchmarks/benchmarks/unit_style/metadata_manager_factory.py @@ -0,0 +1,83 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Small-scope metadata manager factory benchmark tests.""" + +from iris.common import ( + AncillaryVariableMetadata, + BaseMetadata, + CellMeasureMetadata, + CoordMetadata, + CubeMetadata, + DimCoordMetadata, + metadata_manager_factory, +) + + +class MetadataManagerFactory__create: + params = [1, 10, 100] + + def time_AncillaryVariableMetadata(self, n): + [metadata_manager_factory(AncillaryVariableMetadata) for _ in range(n)] + + def time_BaseMetadata(self, n): + [metadata_manager_factory(BaseMetadata) for _ in range(n)] + + def time_CellMeasureMetadata(self, n): + [metadata_manager_factory(CellMeasureMetadata) for _ in range(n)] + + def time_CoordMetadata(self, n): + [metadata_manager_factory(CoordMetadata) for _ in range(n)] + + def time_CubeMetadata(self, n): + [metadata_manager_factory(CubeMetadata) for _ in range(n)] + + def time_DimCoordMetadata(self, n): + [metadata_manager_factory(DimCoordMetadata) for _ in range(n)] + + +class MetadataManagerFactory: + def setup(self): + self.ancillary = metadata_manager_factory(AncillaryVariableMetadata) + self.base = metadata_manager_factory(BaseMetadata) + self.cell = metadata_manager_factory(CellMeasureMetadata) + self.coord = metadata_manager_factory(CoordMetadata) + self.cube = metadata_manager_factory(CubeMetadata) + self.dim = metadata_manager_factory(DimCoordMetadata) + + def time_AncillaryVariableMetadata_fields(self): + self.ancillary.fields + + def time_AncillaryVariableMetadata_values(self): + self.ancillary.values + + def time_BaseMetadata_fields(self): + self.base.fields + + def time_BaseMetadata_values(self): + self.base.values + + def time_CellMeasuresMetadata_fields(self): + self.cell.fields + + def time_CellMeasuresMetadata_values(self): + self.cell.values + + def time_CoordMetadata_fields(self): + self.coord.fields + + def time_CoordMetadata_values(self): + self.coord.values + + def time_CubeMetadata_fields(self): + self.cube.fields + + def time_CubeMetadata_values(self): + self.cube.values + + def time_DimCoordMetadata_fields(self): + self.dim.fields + + def time_DimCoordMetadata_values(self): + self.dim.values diff --git a/benchmarks/benchmarks/unit_style/mixin.py b/benchmarks/benchmarks/unit_style/mixin.py new file mode 100644 index 0000000000..92de5e7ad9 --- /dev/null +++ b/benchmarks/benchmarks/unit_style/mixin.py @@ -0,0 +1,78 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Small-scope CFVariableMixin benchmark tests.""" + +import numpy as np + +from iris import coords +from iris.common.metadata import AncillaryVariableMetadata + +LONG_NAME = "air temperature" +STANDARD_NAME = "air_temperature" +VAR_NAME = "air_temp" +UNITS = "degrees" +ATTRIBUTES = dict(a=1) +DICT = dict( + standard_name=STANDARD_NAME, + long_name=LONG_NAME, + var_name=VAR_NAME, + units=UNITS, + attributes=ATTRIBUTES, +) +METADATA = AncillaryVariableMetadata(**DICT) +TUPLE = tuple(DICT.values()) + + +class CFVariableMixin: + def setup(self): + data_1d = np.zeros(1000) + + # These benchmarks are from a user perspective, so using a user-level + # subclass of CFVariableMixin to test behaviour. AncillaryVariable is + # the simplest so using that. + self.cfm_proxy = coords.AncillaryVariable(data_1d) + self.cfm_proxy.long_name = "test" + + def time_get_long_name(self): + self.cfm_proxy.long_name + + def time_set_long_name(self): + self.cfm_proxy.long_name = LONG_NAME + + def time_get_standard_name(self): + self.cfm_proxy.standard_name + + def time_set_standard_name(self): + self.cfm_proxy.standard_name = STANDARD_NAME + + def time_get_var_name(self): + self.cfm_proxy.var_name + + def time_set_var_name(self): + self.cfm_proxy.var_name = VAR_NAME + + def time_get_units(self): + self.cfm_proxy.units + + def time_set_units(self): + self.cfm_proxy.units = UNITS + + def time_get_attributes(self): + self.cfm_proxy.attributes + + def time_set_attributes(self): + self.cfm_proxy.attributes = ATTRIBUTES + + def time_get_metadata(self): + self.cfm_proxy.metadata + + def time_set_metadata__dict(self): + self.cfm_proxy.metadata = DICT + + def time_set_metadata__tuple(self): + self.cfm_proxy.metadata = TUPLE + + def time_set_metadata__metadata(self): + self.cfm_proxy.metadata = METADATA diff --git a/benchmarks/bm_runner.py b/benchmarks/bm_runner.py new file mode 100644 index 0000000000..afc08ff6fa --- /dev/null +++ b/benchmarks/bm_runner.py @@ -0,0 +1,660 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Argparse conveniences for executing common types of benchmark runs.""" + +from abc import ABC, abstractmethod +import argparse +from datetime import datetime +from importlib import import_module +from os import environ +from pathlib import Path +import re +import shlex +import subprocess +from tempfile import NamedTemporaryFile +from textwrap import dedent +from typing import Literal, Protocol + +# The threshold beyond which shifts are 'notable'. See `asv compare`` docs +# for more. +COMPARE_FACTOR = 1.2 + +BENCHMARKS_DIR = Path(__file__).parent +ROOT_DIR = BENCHMARKS_DIR.parent +# Storage location for reports used in GitHub actions. +GH_REPORT_DIR = ROOT_DIR.joinpath(".github", "workflows", "benchmark_reports") + +# Common ASV arguments for all run_types except `custom`. +ASV_HARNESS = "run {posargs} --attribute rounds=4 --interleave-rounds --show-stderr" + + +def echo(echo_string: str): + # Use subprocess for printing to reduce chance of printing out of sequence + # with the subsequent calls. + subprocess.run(["echo", f"BM_RUNNER DEBUG: {echo_string}"]) + + +def _subprocess_runner(args, asv=False, **kwargs): + # Avoid permanent modifications if the same arguments are used more than once. + args = args.copy() + kwargs = kwargs.copy() + if asv: + args.insert(0, "asv") + kwargs["cwd"] = BENCHMARKS_DIR + echo(" ".join(args)) + kwargs.setdefault("check", True) + return subprocess.run(args, **kwargs) + + +def _subprocess_runner_capture(args, **kwargs) -> str: + result = _subprocess_runner(args, capture_output=True, **kwargs) + return result.stdout.decode().rstrip() + + +def _check_requirements(package: str) -> None: + try: + import_module(package) + except ImportError as exc: + message = ( + f"No {package} install detected. Benchmarks can only " + f"be run in an environment including {package}." + ) + raise Exception(message) from exc + + +def _prep_data_gen_env() -> None: + """Create or access a separate, unchanging environment for generating test data.""" + python_version = "3.12" + data_gen_var = "DATA_GEN_PYTHON" + if data_gen_var in environ: + echo("Using existing data generation environment.") + else: + echo("Setting up the data generation environment ...") + # Get Nox to build an environment for the `tests` session, but don't + # run the session. Will reuse a cached environment if appropriate. + _subprocess_runner( + [ + "nox", + f"--noxfile={ROOT_DIR / 'noxfile.py'}", + "--session=tests", + "--install-only", + f"--python={python_version}", + ] + ) + # Find the environment built above, set it to be the data generation + # environment. + data_gen_python = next( + (ROOT_DIR / ".nox").rglob(f"tests*/bin/python{python_version}") + ).resolve() + environ[data_gen_var] = str(data_gen_python) + + def clone_resource(name: str, clone_source: str) -> Path: + resource_dir = data_gen_python.parents[1] / "resources" + resource_dir.mkdir(exist_ok=True) + clone_dir = resource_dir / name + if not clone_dir.is_dir(): + _subprocess_runner(["git", "clone", clone_source, str(clone_dir)]) + return clone_dir + + echo("Installing Mule into data generation environment ...") + mule_dir = clone_resource("mule", "https://github.com/metomi/mule.git") + _subprocess_runner( + [ + str(data_gen_python), + "-m", + "pip", + "install", + str(mule_dir / "mule"), + ] + ) + + test_data_var = "OVERRIDE_TEST_DATA_REPOSITORY" + if test_data_var not in environ: + echo("Installing iris-test-data into data generation environment ...") + test_data_dir = clone_resource( + "iris-test-data", "https://github.com/SciTools/iris-test-data.git" + ) + environ[test_data_var] = str(test_data_dir / "test_data") + + echo("Data generation environment ready.") + + +def _setup_common() -> None: + _check_requirements("asv") + _check_requirements("nox") + + _prep_data_gen_env() + + echo("Setting up ASV ...") + _subprocess_runner(["machine", "--yes"], asv=True) + + echo("Setup complete.") + + +def _asv_compare(*commits: str, overnight_mode: bool = False) -> None: + """Run through a list of commits comparing each one to the next.""" + commits = tuple(commit[:8] for commit in commits) + for i in range(len(commits) - 1): + before = commits[i] + after = commits[i + 1] + asv_command = shlex.split( + f"compare {before} {after} --factor={COMPARE_FACTOR} --split" + ) + + comparison = _subprocess_runner_capture(asv_command, asv=True) + echo(comparison) + shifts = _subprocess_runner_capture([*asv_command, "--only-changed"], asv=True) + + if shifts or (not overnight_mode): + # For the overnight run: only post if there are shifts. + _gh_create_reports(after, comparison, shifts) + + +def _gh_create_reports(commit_sha: str, results_full: str, results_shifts: str) -> None: + """If running under GitHub Actions: record the results in report(s). + + Posting the reports is done by :func:`_gh_post_reports`, which must be run + within a separate action to comply with GHA's security limitations. + """ + if "GITHUB_ACTIONS" not in environ: + # Only run when within GHA. + return + + pr_number = environ.get("PR_NUMBER", None) + on_pull_request = pr_number is not None + run_id = environ["GITHUB_RUN_ID"] + repo = environ["GITHUB_REPOSITORY"] + gha_run_link = f"[`{run_id}`](https://github.com/{repo}/actions/runs/{run_id})" + + GH_REPORT_DIR.mkdir(exist_ok=True) + commit_dir = GH_REPORT_DIR / commit_sha + commit_dir.mkdir() + command_path = commit_dir / "command.txt" + body_path = commit_dir / "body.txt" + + performance_report = dedent( + ( + """ + # :stopwatch: Performance Benchmark Report: {commit_sha} + +
+ Performance shifts + + ``` + {results_shifts} + ``` + +
+ +
+ Full benchmark results + + ``` + {results_full} + ``` + +
+ + Generated by GHA run {gha_run_link} + """ + ) + ) + performance_report = performance_report.format( + commit_sha=commit_sha, + results_shifts=results_shifts, + results_full=results_full, + gha_run_link=gha_run_link, + ) + + if on_pull_request: + # Command to post the report as a comment on the active PR. + body_path.write_text(performance_report) + command = ( + f"gh pr comment {pr_number} " + f"--body-file {body_path.absolute()} " + f"--repo {repo}" + ) + command_path.write_text(command) + + else: + # Command to post the report as new issue. + commit_msg = _subprocess_runner_capture( + f"git log {commit_sha}^! --oneline".split(" ") + ) + # Intended for benchmarking commits on trunk - should include a PR + # number due to our squash policy. + pr_tag_match = re.search("#[0-9]*", commit_msg) + + assignee = "" + pr_tag = "pull request number unavailable" + if pr_tag_match is not None: + pr_tag = pr_tag_match.group(0) + + for login_type in ("author", "mergedBy"): + gh_query = f'.["{login_type}"]["login"]' + commandlist = shlex.split( + f"gh pr view {pr_tag[1:]} " + f"--json {login_type} -q '{gh_query}' " + f"--repo {repo}" + ) + login = _subprocess_runner_capture(commandlist) + + commandlist = [ + "curl", + "-s", + f"https://api.github.com/users/{login}", + ] + login_info = _subprocess_runner_capture(commandlist) + is_user = '"type": "User"' in login_info + if is_user: + assignee = login + break + + title = f"Performance Shift(s): `{commit_sha}`" + body = dedent( + ( + f""" + Benchmark comparison has identified performance shifts at: + + * commit {commit_sha} ({pr_tag}). + +

+ Please review the report below and + take corrective/congratulatory action as appropriate + :slightly_smiling_face: +

+ """ + ) + ) + body += performance_report + body_path.write_text(body) + + command = ( + "gh issue create " + f'--title "{title}" ' + f"--body-file {body_path.absolute()} " + '--label "Bot" ' + '--label "Type: Performance" ' + f"--repo {repo}" + ) + if assignee: + command += f" --assignee {assignee}" + command_path.write_text(command) + + +def _gh_post_reports() -> None: + """If running under GitHub Actions: post pre-prepared benchmark reports. + + Reports are prepared by :func:`_gh_create_reports`, which must be run + within a separate action to comply with GHA's security limitations. + """ + if "GITHUB_ACTIONS" not in environ: + # Only run when within GHA. + return + + commit_dirs = [x for x in GH_REPORT_DIR.iterdir() if x.is_dir()] + for commit_dir in commit_dirs: + command_path = commit_dir / "command.txt" + command = command_path.read_text() + + # Security: only accept certain commands to run. + assert command.startswith(("gh issue create", "gh pr comment")) + + _subprocess_runner(shlex.split(command)) + + +class _SubParserGenerator(ABC): + """Convenience for holding all the necessary argparse info in 1 place.""" + + name: str = NotImplemented + description: str = NotImplemented + epilog: str = NotImplemented + + class _SubParsersType(Protocol): + """Duck typing since argparse._SubParsersAction is private.""" + + def add_parser(self, name, **kwargs) -> argparse.ArgumentParser: ... + + def __init__(self, subparsers: _SubParsersType) -> None: + self.subparser = subparsers.add_parser( + self.name, + description=self.description, + epilog=self.epilog, + formatter_class=argparse.RawTextHelpFormatter, + ) + self.add_arguments() + self.add_asv_arguments() + self.subparser.set_defaults(func=self.func) + + @abstractmethod + def add_arguments(self) -> None: + """All custom self.subparser.add_argument() calls.""" + _ = NotImplemented + + def add_asv_arguments(self) -> None: + self.subparser.add_argument( + "asv_args", + nargs=argparse.REMAINDER, + help="Any number of arguments to pass down to the ASV benchmark command.", + ) + + @staticmethod + @abstractmethod + def func(args: argparse.Namespace): + """Return when the subparser is parsed. + + `func` is then called, performing the user's selected sub-command. + + """ + _ = args + return NotImplemented + + +class Overnight(_SubParserGenerator): + name = "overnight" + description = ( + "Benchmarks all commits between the input **first_commit** to ``HEAD``, " + "comparing each to its parent for performance shifts. If running on " + "GitHub Actions: performance shift(s) will be reported in a new issue.\n" + "Designed for checking the previous 24 hours' commits, typically in a " + "scheduled script.\n" + "Uses `asv run`." + ) + epilog = ( + "e.g. python bm_runner.py overnight a1b23d4\n" + "e.g. python bm_runner.py overnight a1b23d4 --bench=regridding" + ) + + def add_arguments(self) -> None: + self.subparser.add_argument( + "first_commit", + type=str, + help="The first commit in the benchmarking commit sequence.", + ) + + @staticmethod + def func(args: argparse.Namespace) -> None: + _setup_common() + + commit_range = f"{args.first_commit}^^.." + # git rev-list --first-parent is the command ASV uses. + git_command = shlex.split(f"git rev-list --first-parent {commit_range}") + commit_string = _subprocess_runner_capture(git_command) + commit_list = commit_string.split("\n") + + asv_command = shlex.split(ASV_HARNESS.format(posargs=commit_range)) + try: + _subprocess_runner([*asv_command, *args.asv_args], asv=True) + finally: + # Designed for long running - want to compare/post any valid + # results even if some are broken. + _asv_compare(*reversed(commit_list), overnight_mode=True) + + +class Branch(_SubParserGenerator): + name = "branch" + description = ( + "Performs the same operations as ``overnight``, but always on two commits " + "only - ``HEAD``, and ``HEAD``'s merge-base with the input " + "**base_branch**. If running on GitHub Actions: HEAD will be GitHub's " + "merge commit and merge-base will be the merge target. Performance " + "comparisons will be posted in a comment on the relevant pull request.\n" + "Designed " + "for testing if the active branch's changes cause performance shifts - " + "anticipating what would be caught by ``overnight`` once merged.\n\n" + "**For maximum accuracy, avoid using the machine that is running this " + "session. Run time could be >1 hour for the full benchmark suite.**\n" + "Uses `asv run`." + ) + epilog = ( + "e.g. python bm_runner.py branch upstream/main\n" + "e.g. python bm_runner.py branch upstream/main --bench=regridding" + ) + + def add_arguments(self) -> None: + self.subparser.add_argument( + "base_branch", + type=str, + help="A branch that has the merge-base with ``HEAD`` - ``HEAD`` will be benchmarked against that merge-base.", + ) + + @staticmethod + def func(args: argparse.Namespace) -> None: + _setup_common() + + git_command = shlex.split("git rev-parse HEAD") + head_sha = _subprocess_runner_capture(git_command)[:8] + + git_command = shlex.split(f"git merge-base {head_sha} {args.base_branch}") + merge_base = _subprocess_runner_capture(git_command)[:8] + + with NamedTemporaryFile("w") as hashfile: + hashfile.writelines([merge_base, "\n", head_sha]) + hashfile.flush() + commit_range = f"HASHFILE:{hashfile.name}" + asv_command = shlex.split(ASV_HARNESS.format(posargs=commit_range)) + _subprocess_runner([*asv_command, *args.asv_args], asv=True) + + _asv_compare(merge_base, head_sha) + + +class _CSPerf(_SubParserGenerator, ABC): + """Common code used by both CPerf and SPerf.""" + + description = ( + "Run the on-demand {} suite of benchmarks (part of the UK Met " + "Office NG-VAT project) for the ``HEAD`` of ``upstream/main`` only, " + "and publish the results to the input **publish_dir**, within a " + "unique subdirectory for this run.\n" + "Uses `asv run`." + ) + epilog = ( + "e.g. python bm_runner.py {0} my_publish_dir\n" + "e.g. python bm_runner.py {0} my_publish_dir --bench=regridding" + ) + + def add_arguments(self) -> None: + self.subparser.add_argument( + "publish_dir", + type=str, + help="HTML results will be published to a sub-dir in this dir.", + ) + + @staticmethod + def csperf(args: argparse.Namespace, run_type: Literal["cperf", "sperf"]) -> None: + _setup_common() + + publish_dir = Path(args.publish_dir) + if not publish_dir.is_dir(): + message = f"Input 'publish directory' is not a directory: {publish_dir}" + raise NotADirectoryError(message) + publish_subdir = ( + publish_dir / f"{run_type}_{datetime.now().strftime('%Y%m%d_%H%M%S')}" + ) + publish_subdir.mkdir() + + # Activate on demand benchmarks (C/SPerf are deactivated for + # 'standard' runs). + environ["ON_DEMAND_BENCHMARKS"] = "True" + commit_range = "upstream/main^!" + + asv_command_str = ( + ASV_HARNESS.format(posargs=commit_range) + f" --bench={run_type}" + ) + + # Only do a single round. + asv_command = shlex.split(re.sub(r"rounds=\d", "rounds=1", asv_command_str)) + try: + _subprocess_runner([*asv_command, *args.asv_args], asv=True) + except subprocess.CalledProcessError as err: + # C/SPerf benchmarks are much bigger than the CI ones: + # Don't fail the whole run if memory blows on 1 benchmark. + # ASV produces return code of 2 if the run includes crashes. + if err.returncode != 2: + raise + + asv_command = shlex.split(f"publish {commit_range} --html-dir={publish_subdir}") + _subprocess_runner(asv_command, asv=True) + + # Print completion message. + location = BENCHMARKS_DIR / ".asv" + echo( + f'New ASV results for "{run_type}".\n' + f'See "{publish_subdir}",' + f'\n or JSON files under "{location / "results"}".' + ) + + +class CPerf(_CSPerf): + name = "cperf" + description = _CSPerf.description.format("CPerf") + epilog = _CSPerf.epilog.format("cperf") + + @staticmethod + def func(args: argparse.Namespace) -> None: + _CSPerf.csperf(args, "cperf") + + +class SPerf(_CSPerf): + name = "sperf" + description = _CSPerf.description.format("SPerf") + epilog = _CSPerf.epilog.format("sperf") + + @staticmethod + def func(args: argparse.Namespace) -> None: + _CSPerf.csperf(args, "sperf") + + +class Custom(_SubParserGenerator): + name = "custom" + description = ( + "Run ASV with the input **ASV sub-command**, without any preset " + "arguments - must all be supplied by the user. So just like running " + "ASV manually, with the convenience of re-using the runner's " + "scripted setup steps." + ) + epilog = "e.g. python bm_runner.py custom continuous a1b23d4 HEAD --quick" + + def add_arguments(self) -> None: + self.subparser.add_argument( + "asv_sub_command", + type=str, + help="The ASV command to run.", + ) + + @staticmethod + def func(args: argparse.Namespace) -> None: + _setup_common() + _subprocess_runner([args.asv_sub_command, *args.asv_args], asv=True) + + +class TrialRun(_SubParserGenerator): + name = "trialrun" + description = ( + "Fast trial-run a given benchmark, to check it works : " + "in a provided or latest-lockfile environment, " + "with no repeats for accuracy of measurement." + ) + epilog = ( + "e.g. python bm_runner.py trialrun " + "MyBenchmarks.time_calc ${DATA_GEN_PYTHON}" + "\n\nNOTE: 'runpath' also replaces $DATA_GEN_PYTHON during the run." + ) + + def add_arguments(self) -> None: + self.subparser.add_argument( + "benchmark", + type=str, + help=( + "A benchmark name, possibly including wildcards, " + "as supported by the ASV '--bench' argument." + ), + ) + self.subparser.add_argument( + "runpath", + type=str, + help=( + "A path to an existing python executable, " + "to completely bypass environment building." + ), + ) + + @staticmethod + def func(args: argparse.Namespace) -> None: + if args.runpath: + # Shortcut creation of a data-gen environment + # - which is also the trial-run env. + python_path = Path(args.runpath).resolve() + environ["DATA_GEN_PYTHON"] = str(python_path) + _setup_common() + # get path of data-gen environment, setup by previous call + python_path = Path(environ["DATA_GEN_PYTHON"]) + # allow 'on-demand' benchmarks + environ["ON_DEMAND_BENCHMARKS"] = "1" + asv_command = [ + "run", + "--bench", + args.benchmark, + # no repeats for timing accuracy + "--quick", + "--show-stderr", + # do not build a unique env : run test in data-gen environment + "--environment", + f"existing:{python_path}", + ] + args.asv_args + _subprocess_runner(asv_command, asv=True) + + +class GhPost(_SubParserGenerator): + name = "_gh_post" + description = ( + "Used by GitHub Actions to post benchmark reports that were prepared " + "during previous actions. Separated to comply with GitHub's security " + "requirements." + ) + epilog = "Sole acceptable syntax: python bm_runner.py _gh_post" + + @staticmethod + def func(args: argparse.Namespace) -> None: + _gh_post_reports() + + # No arguments permitted for this subclass: + + def add_arguments(self) -> None: + pass + + def add_asv_arguments(self) -> None: + pass + + +def main(): + parser = argparse.ArgumentParser( + description="Run the Iris performance benchmarks (using Airspeed Velocity).", + epilog=( + "More help is available within each sub-command." + "\n\nNOTE(1): a separate python environment is created to " + "construct test files.\n Set $DATA_GEN_PYTHON to avoid the cost " + "of this." + "\nNOTE(2): iris-test-data is downloaded and cached within the " + "data generation environment.\n Set " + "$OVERRIDE_TEST_DATA_REPOSITORY to avoid the cost of this." + "\nNOTE(3): test data is cached within the " + "benchmarks code directory, and uses a lot of disk space " + "of disk space (Gb).\n Set $BENCHMARK_DATA to specify where this " + "space can be safely allocated." + ), + formatter_class=argparse.RawTextHelpFormatter, + ) + subparsers = parser.add_subparsers(required=True) + + for gen in (Overnight, Branch, CPerf, SPerf, Custom, TrialRun, GhPost): + _ = gen(subparsers).subparser + + parsed = parser.parse_args() + parsed.func(parsed) + + +if __name__ == "__main__": + main() diff --git a/benchmarks/custom_bms/README.md b/benchmarks/custom_bms/README.md new file mode 100644 index 0000000000..eea85d74fe --- /dev/null +++ b/benchmarks/custom_bms/README.md @@ -0,0 +1,11 @@ +# Iris custom benchmarks + +To be recognised by ASV, these benchmarks must be packaged and installed in +line with the +[ASV guidelines](https://asv.readthedocs.io/projects/asv-runner/en/latest/development/benchmark_plugins.html). +This is achieved using the custom build in [install.py](./install.py). + +Installation is into the environment where the benchmarks are run (i.e. not +the environment containing ASV + Nox, but the one built to the same +specifications as the Tests environment). This is done via `build_command` +in [asv.conf.json](../asv.conf.json). diff --git a/benchmarks/custom_bms/install.py b/benchmarks/custom_bms/install.py new file mode 100644 index 0000000000..59d27a0b43 --- /dev/null +++ b/benchmarks/custom_bms/install.py @@ -0,0 +1,55 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Install Iris' custom benchmarks for detection by ASV. + +See the requirements for being detected as an ASV plugin: +https://asv.readthedocs.io/projects/asv-runner/en/latest/development/benchmark_plugins.html +""" + +from pathlib import Path +import shutil +from subprocess import run +from tempfile import TemporaryDirectory + +this_dir = Path(__file__).parent + + +def package_files(new_dir: Path) -> None: + """Package Iris' custom benchmarks for detection by ASV. + + Parameters + ---------- + new_dir : Path + The directory to package the custom benchmarks in. + """ + asv_bench_iris = new_dir / "asv_bench_iris" + benchmarks = asv_bench_iris / "benchmarks" + benchmarks.mkdir(parents=True) + (asv_bench_iris / "__init__.py").touch() + + for py_file in this_dir.glob("*.py"): + if py_file != Path(__file__): + shutil.copy2(py_file, benchmarks) + + # Create this on the fly, as having multiple pyproject.toml files in 1 + # project causes problems. + py_project = new_dir / "pyproject.toml" + py_project.write_text( + """ + [project] + name = "asv_bench_iris" + version = "0.1" + """ + ) + + +def main(): + with TemporaryDirectory() as temp_dir: + package_files(Path(temp_dir)) + run(["python", "-m", "pip", "install", temp_dir]) + + +if __name__ == "__main__": + main() diff --git a/benchmarks/custom_bms/tracemallocbench.py b/benchmarks/custom_bms/tracemallocbench.py new file mode 100644 index 0000000000..486c67aeb9 --- /dev/null +++ b/benchmarks/custom_bms/tracemallocbench.py @@ -0,0 +1,196 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. + +"""Benchmark for growth in process resident memory, repeating for accuracy. + +Uses a modified version of the repeat logic in +:class:`asv_runner.benchmarks.time.TimeBenchmark`. +""" + +import re +from timeit import Timer +import tracemalloc +from typing import Callable + +from asv_runner.benchmarks.time import TimeBenchmark, wall_timer + + +class TracemallocBenchmark(TimeBenchmark): + """Benchmark for growth in process resident memory, repeating for accuracy. + + Obviously limited as to what it actually measures : Relies on the current + process not having significant unused (de-allocated) memory when the + tested codeblock runs, and only reliable when the code allocates a + significant amount of new memory. + + Benchmark operations prefixed with ``tracemalloc_`` or ``Tracemalloc`` will + use this benchmark class. + + Inherits behaviour from :class:`asv_runner.benchmarks.time.TimeBenchmark`, + with modifications for memory measurement. See the below Attributes section + and https://asv.readthedocs.io/en/stable/writing_benchmarks.html#timing-benchmarks. + + Attributes + ---------- + Mostly identical to :class:`asv_runner.benchmarks.time.TimeBenchmark`. See + https://asv.readthedocs.io/en/stable/benchmarks.html#timing-benchmarks + Make sure to use the inherited ``repeat`` attribute if greater accuracy + is needed. Below are the attributes where inherited behaviour is + overridden. + + number : int + The number of times the benchmarked operation will be called per + ``repeat``. Memory growth is measured after ALL calls - + i.e. `number` should make no difference to the result if the operation + has perfect garbage collection. The parent class's intelligent + modification of `number` is NOT inherited. A minimum value of ``1`` is + enforced. + warmup_time, sample_time, min_run_count, timer + Not used. + type : str = "tracemalloc" + The name of this benchmark type. + unit : str = "bytes" + The units of the measured metric (i.e. the growth in memory). + + """ + + name_regex = re.compile("^(Tracemalloc[A-Z_].+)|(tracemalloc_.+)$") + + param: tuple + + def __init__(self, name: str, func: Callable, attr_sources: list) -> None: + """Initialize a new instance of `TracemallocBenchmark`. + + Parameters + ---------- + name : str + The name of the benchmark. + func : callable + The function to benchmark. + attr_sources : list + A list of objects from which to draw attributes. + """ + super().__init__(name, func, attr_sources) + self.type = "tracemalloc" + self.unit = "bytes" + + def _load_vars(self): + """Load benchmark variables from attribute sources. + + Downstream handling of ``number`` is not the same as in the parent, so + need to make sure it is at least 1. + """ + super()._load_vars() + self.number = max(1, self.number) + + def run(self, *param: tuple) -> dict: + """Run the benchmark with the given parameters. + + Downstream handling of ``param`` is not the same as in the parent, so + need to store it now. + + Parameters + ---------- + *param : tuple + The parameters to pass to the benchmark function. + + Returns + ------- + dict + A dictionary with the benchmark results. It contains the samples + taken, and "the number of times the function was called in each + sample" - for this benchmark that is always ``1`` to avoid the + parent class incorrectly modifying the results. + """ + self.param = param + return super().run(*param) + + def benchmark_timing( + self, + timer: Timer, + min_repeat: int, + max_repeat: int, + max_time: float, + warmup_time: float, + number: int, + min_run_count: int, + ) -> tuple[list[int], int]: + """Benchmark the timing of the function execution. + + Heavily modified from the parent method + - Directly performs setup and measurement (parent used timeit). + - `number` used differently (see Parameters). + - No warmup phase. + + Parameters + ---------- + timer : timeit.Timer + Not used. + min_repeat : int + The minimum number of times to repeat the function execution. + max_repeat : int + The maximum number of times to repeat the function execution. + max_time : float + The maximum total time to spend on the benchmarking. + warmup_time : float + Not used. + number : int + The number of times the benchmarked operation will be called per + repeat. Memory growth is measured after ALL calls - i.e. `number` + should make no difference to the result if the operation + has perfect garbage collection. The parent class's intelligent + modification of `number` is NOT inherited. + min_run_count : int + Not used. + + Returns + ------- + list + A list of the measured memory growths, in bytes. + int = 1 + Part of the inherited return signature. Must be 1 to avoid + the parent incorrectly modifying the results. + """ + start_time = wall_timer() + samples: list[int] = [] + + def too_slow(num_samples) -> bool: + """Stop taking samples if limits exceeded. + + Parameters + ---------- + num_samples : int + The number of samples taken so far. + + Returns + ------- + bool + True if the benchmark should stop, False otherwise. + """ + if num_samples < min_repeat: + return False + return wall_timer() > start_time + max_time + + # Collect samples + while len(samples) < max_repeat: + self.redo_setup() + tracemalloc.start() + for _ in range(number): + __ = self.func(*self.param) + _, peak_mem_bytes = tracemalloc.get_traced_memory() + tracemalloc.stop() + + samples.append(peak_mem_bytes) + + if too_slow(len(samples)): + break + + # ``number`` is not used in the same way as in the parent class. Must + # be returned as 1 to avoid parent incorrectly modifying the results. + return samples, 1 + + +# https://asv.readthedocs.io/projects/asv-runner/en/latest/development/benchmark_plugins.html +export_as_benchmark = [TracemallocBenchmark] diff --git a/codecov.yml b/codecov.yml new file mode 100644 index 0000000000..a0efbb9997 --- /dev/null +++ b/codecov.yml @@ -0,0 +1,9 @@ +coverage: + # see https://docs.codecov.com/docs/commit-status + status: + project: + default: + target: auto + # coverage can drop by up to % while still posting success + threshold: 3% + patch: off diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 0000000000..b6f52f58f9 --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,51 @@ +SUBDIRS = src + +help: + @for i in $(SUBDIRS); do \ + echo "make help in $$i..."; \ + (cd $$i; $(MAKE) $(MFLAGS) $(MYMAKEFLAGS) help); done + +html: + @for i in $(SUBDIRS); do \ + echo "make html in $$i..."; \ + (cd $$i; $(MAKE) $(MFLAGS) $(MYMAKEFLAGS) html); done + +html-noplot: + @for i in $(SUBDIRS); do \ + echo "make html-noplot in $$i..."; \ + (cd $$i; $(MAKE) $(MFLAGS) $(MYMAKEFLAGS) html-noplot); done + +html-noapi: + @for i in $(SUBDIRS); do \ + echo "make html-noapi in $$i..."; \ + (cd $$i; $(MAKE) $(MFLAGS) $(MYMAKEFLAGS) html-noapi); done + +html-quick: + @for i in $(SUBDIRS); do \ + echo "make html-quick in $$i..."; \ + (cd $$i; $(MAKE) $(MFLAGS) $(MYMAKEFLAGS) html-quick); done + +clean: + @for i in $(SUBDIRS); do \ + echo "Clearing in $$i..."; \ + (cd $$i; $(MAKE) $(MFLAGS) $(MYMAKEFLAGS) clean); done + +doctest: + @for i in $(SUBDIRS); do \ + echo "Running doctest in $$i..."; \ + (cd $$i; $(MAKE) $(MFLAGS) $(MYMAKEFLAGS) doctest); done + +linkcheck: + @for i in $(SUBDIRS); do \ + echo "Running linkcheck in $$i..."; \ + (cd $$i; $(MAKE) $(MFLAGS) $(MYMAKEFLAGS) linkcheck); done + +show: + @for i in $(SUBDIRS); do \ + echo "Running show in $$i..."; \ + (cd $$i; $(MAKE) $(MFLAGS) $(MYMAKEFLAGS) show); done + +livehtml: + @for i in $(SUBDIRS); do \ + echo "Running show in $$i..."; \ + (cd $$i; $(MAKE) $(MFLAGS) $(MYMAKEFLAGS) livehtml); done \ No newline at end of file diff --git a/docs/gallery_code/README.rst b/docs/gallery_code/README.rst new file mode 100644 index 0000000000..85bf0552b4 --- /dev/null +++ b/docs/gallery_code/README.rst @@ -0,0 +1,28 @@ +.. _gallery_index: + +Gallery +======= + +The gallery is divided into sections as described below. All entries +show the code used to produce the example plot. Additionally there are links +to download the code directly as source or as part of a +`jupyter notebook `_, +these links are at the bottom of the page. + +In order to successfully view the jupyter notebook locally so you may +experiment with the code you will need an environment setup with the +appropriate dependencies, see :ref:`installing_iris` for instructions. +Ensure that ``iris-sample-data`` is installed as it is used in the gallery. +Additionally ensure that you install ``jupyter``. The command to install both +is:: + + conda install -c conda-forge iris-sample-data jupyter + +Once you have downloaded the notebooks (bottom of each gallery page), +you may start the jupyter notebook via:: + + jupyter notebook + +If you wish to contribute to the gallery see the +:ref:`contributing.documentation.gallery` section of the +:ref:`contributing.documentation_full`. diff --git a/docs/gallery_code/general/README.rst b/docs/gallery_code/general/README.rst new file mode 100644 index 0000000000..3a48e7cd8e --- /dev/null +++ b/docs/gallery_code/general/README.rst @@ -0,0 +1,3 @@ +General +------- + diff --git a/docs/gallery_code/general/plot_SOI_filtering.py b/docs/gallery_code/general/plot_SOI_filtering.py new file mode 100644 index 0000000000..4b256c894c --- /dev/null +++ b/docs/gallery_code/general/plot_SOI_filtering.py @@ -0,0 +1,108 @@ +""" +Applying a Filter to a Time-Series +================================== + +This example demonstrates low pass filtering a time-series by applying a +weighted running mean over the time dimension. + +The time-series used is the Darwin-only Southern Oscillation index (SOI), +which is filtered using two different Lanczos filters, one to filter out +time-scales of less than two years and one to filter out time-scales of +less than 7 years. + +References +---------- + Duchon C. E. (1979) Lanczos Filtering in One and Two Dimensions. + Journal of Applied Meteorology, Vol 18, pp 1016-1022. + + Trenberth K. E. (1984) Signal Versus Noise in the Southern Oscillation. + Monthly Weather Review, Vol 112, pp 326-332 + +""" # noqa: D205, D212, D400 + +import matplotlib.pyplot as plt +import numpy as np + +import iris +import iris.plot as iplt + + +def low_pass_weights(window, cutoff): + """Calculate weights for a low pass Lanczos filter. + + Parameters + ---------- + window : int + The length of the filter window. + cutoff : float + The cutoff frequency in inverse time steps. + + """ + order = ((window - 1) // 2) + 1 + nwts = 2 * order + 1 + w = np.zeros([nwts]) + n = nwts // 2 + w[n] = 2 * cutoff + k = np.arange(1.0, n) + sigma = np.sin(np.pi * k / n) * n / (np.pi * k) + firstfactor = np.sin(2.0 * np.pi * cutoff * k) / (np.pi * k) + w[n - 1 : 0 : -1] = firstfactor * sigma + w[n + 1 : -1] = firstfactor * sigma + return w[1:-1] + + +def main(): + # Load the monthly-valued Southern Oscillation Index (SOI) time-series. + fname = iris.sample_data_path("SOI_Darwin.nc") + soi = iris.load_cube(fname) + + # Window length for filters. + window = 121 + + # Construct 2-year (24-month) and 7-year (84-month) low pass filters + # for the SOI data which is monthly. + wgts24 = low_pass_weights(window, 1.0 / 24.0) + wgts84 = low_pass_weights(window, 1.0 / 84.0) + + # Apply each filter using the rolling_window method used with the weights + # keyword argument. A weighted sum is required because the magnitude of + # the weights are just as important as their relative sizes. + soi24 = soi.rolling_window("time", iris.analysis.SUM, len(wgts24), weights=wgts24) + soi84 = soi.rolling_window("time", iris.analysis.SUM, len(wgts84), weights=wgts84) + + # Plot the SOI time series and both filtered versions. + plt.figure(figsize=(9, 4)) + iplt.plot( + soi, + color="0.7", + linewidth=1.0, + linestyle="-", + alpha=1.0, + label="no filter", + ) + iplt.plot( + soi24, + color="b", + linewidth=2.0, + linestyle="-", + alpha=0.7, + label="2-year filter", + ) + iplt.plot( + soi84, + color="r", + linewidth=2.0, + linestyle="-", + alpha=0.7, + label="7-year filter", + ) + plt.ylim([-4, 4]) + plt.title("Southern Oscillation Index (Darwin Only)") + plt.xlabel("Time") + plt.ylabel("SOI") + plt.legend(fontsize=10) + iplt.show() + + +if __name__ == "__main__": + main() diff --git a/docs/gallery_code/general/plot_anomaly_log_colouring.py b/docs/gallery_code/general/plot_anomaly_log_colouring.py new file mode 100644 index 0000000000..cd11161041 --- /dev/null +++ b/docs/gallery_code/general/plot_anomaly_log_colouring.py @@ -0,0 +1,111 @@ +""" +Colouring Anomaly Data With Logarithmic Scaling +=============================================== + +In this example, we need to plot anomaly data where the values have a +"logarithmic" significance -- i.e. we want to give approximately equal ranges +of colour between data values of, say, 1 and 10 as between 10 and 100. + +As the data range also contains zero, that obviously does not suit a simple +logarithmic interpretation. However, values of less than a certain absolute +magnitude may be considered "not significant", so we put these into a separate +"zero band" which is plotted in white. + +To do this, we create a custom value mapping function (normalization) using +the matplotlib Norm class :obj:`matplotlib.colors.SymLogNorm`. +We use this to make a cell-filled pseudocolor plot with a colorbar. + +.. note:: + + By "pseudocolour", we mean that each data point is drawn as a "cell" + region on the plot, coloured according to its data value. + This is provided in Iris by the functions :meth:`iris.plot.pcolor` and + :meth:`iris.plot.pcolormesh`, which call the underlying matplotlib + functions of the same names (i.e., :obj:`matplotlib.pyplot.pcolor` + and :obj:`matplotlib.pyplot.pcolormesh`). + See also: https://en.wikipedia.org/wiki/False_color#Pseudocolor. + +""" # noqa: D205, D212, D400 + +import cartopy.crs as ccrs +import matplotlib.colors as mcols +import matplotlib.pyplot as plt + +import iris +import iris.coord_categorisation +import iris.plot as iplt + + +def main(): + # Load a sample air temperatures sequence. + file_path = iris.sample_data_path("E1_north_america.nc") + temperatures = iris.load_cube(file_path) + + # Create a year-number coordinate from the time information. + iris.coord_categorisation.add_year(temperatures, "time") + + # Create a sample anomaly field for one chosen year, by extracting that + # year and subtracting the time mean. + sample_year = 1982 + year_temperature = temperatures.extract(iris.Constraint(year=sample_year)) + time_mean = temperatures.collapsed("time", iris.analysis.MEAN) + anomaly = year_temperature - time_mean + + # Construct a plot title string explaining which years are involved. + years = temperatures.coord("year").points + plot_title = "Temperature anomaly" + plot_title += "\n{} differences from {}-{} average.".format( + sample_year, years[0], years[-1] + ) + + # Define scaling levels for the logarithmic colouring. + minimum_log_level = 0.1 + maximum_scale_level = 3.0 + + # Use a standard colour map which varies blue-white-red. + # For suitable options, see the 'Diverging colormaps' section in: + # https://matplotlib.org/stable/gallery/color/colormap_reference.html + anom_cmap = "bwr" + + # Create a 'logarithmic' data normalization. + anom_norm = mcols.SymLogNorm( + linthresh=minimum_log_level, + linscale=0.01, + vmin=-maximum_scale_level, + vmax=maximum_scale_level, + ) + # Setting "linthresh=minimum_log_level" makes its non-logarithmic + # data range equal to our 'zero band'. + # Setting "linscale=0.01" maps the whole zero band to the middle colour value + # (i.e., 0.5), which is the neutral point of a "diverging" style colormap. + + # Create an Axes, specifying the map projection. + plt.axes(projection=ccrs.LambertConformal()) + + # Make a pseudocolour plot using this colour scheme. + mesh = iplt.pcolormesh(anomaly, cmap=anom_cmap, norm=anom_norm) + + # Add a colourbar, with extensions to show handling of out-of-range values. + bar = plt.colorbar(mesh, orientation="horizontal", extend="both") + + # Set some suitable fixed "logarithmic" colourbar tick positions. + tick_levels = [-3, -1, -0.3, 0.0, 0.3, 1, 3] + bar.set_ticks(tick_levels) + + # Modify the tick labels so that the centre one shows "+/-". + tick_levels[3] = r"$\pm${:g}".format(minimum_log_level) + bar.set_ticklabels(tick_levels) + + # Label the colourbar to show the units. + bar.set_label("[{}, log scale]".format(anomaly.units)) + + # Add coastlines and a title. + plt.gca().coastlines() + plt.title(plot_title) + + # Display the result. + iplt.show() + + +if __name__ == "__main__": + main() diff --git a/docs/gallery_code/general/plot_coriolis.py b/docs/gallery_code/general/plot_coriolis.py new file mode 100644 index 0000000000..905108abfd --- /dev/null +++ b/docs/gallery_code/general/plot_coriolis.py @@ -0,0 +1,84 @@ +""" +Deriving the Coriolis Frequency Over the Globe +============================================== + +This code computes the Coriolis frequency and stores it in a cube with +associated metadata. It then plots the Coriolis frequency on an orthographic +projection. + +""" # noqa: D205, D212, D400 + +import cartopy.crs as ccrs +import matplotlib.pyplot as plt +import numpy as np + +import iris +from iris.coord_systems import GeogCS +import iris.plot as iplt + + +def main(): + # Start with arrays for latitudes and longitudes, with a given number of + # coordinates in the arrays. + coordinate_points = 200 + longitudes = np.linspace(-180.0, 180.0, coordinate_points) + latitudes = np.linspace(-90.0, 90.0, coordinate_points) + lon2d, lat2d = np.meshgrid(longitudes, latitudes) + + # Omega is the Earth's rotation rate, expressed in radians per second + omega = 7.29e-5 + + # The data for our cube is the Coriolis frequency, + # `f = 2 * omega * sin(phi)`, which is computed for each grid point over + # the globe from the 2-dimensional latitude array. + data = 2.0 * omega * np.sin(np.deg2rad(lat2d)) + + # We now need to define a coordinate system for the plot. + # Here we'll use GeogCS; 6371229 is the radius of the Earth in metres. + cs = GeogCS(6371229) + + # The Iris coords module turns the latitude list into a coordinate array. + # Coords then applies an appropriate standard name and unit to it. + lat_coord = iris.coords.DimCoord( + latitudes, standard_name="latitude", units="degrees", coord_system=cs + ) + + # The above process is repeated for the longitude coordinates. + lon_coord = iris.coords.DimCoord( + longitudes, standard_name="longitude", units="degrees", coord_system=cs + ) + + # Now we add bounds to our latitude and longitude coordinates. + # We want simple, contiguous bounds for our regularly-spaced coordinate + # points so we use the guess_bounds() method of the coordinate. For more + # complex coordinates, we could derive and set the bounds manually. + lat_coord.guess_bounds() + lon_coord.guess_bounds() + + # Now we input our data array into the cube. + new_cube = iris.cube.Cube( + data, + standard_name="coriolis_parameter", + units="s-1", + dim_coords_and_dims=[(lat_coord, 0), (lon_coord, 1)], + ) + + # Now let's plot our cube, along with coastlines, a title and an + # appropriately-labelled colour bar: + ax = plt.axes(projection=ccrs.Orthographic()) + ax.coastlines(resolution="10m") + mesh = iplt.pcolormesh(new_cube, cmap="seismic") + tick_levels = [-0.00012, -0.00006, 0.0, 0.00006, 0.00012] + plt.colorbar( + mesh, + orientation="horizontal", + label="s-1", + ticks=tick_levels, + format="%.1e", + ) + plt.title("Coriolis frequency") + plt.show() + + +if __name__ == "__main__": + main() diff --git a/docs/gallery_code/general/plot_cross_section.py b/docs/gallery_code/general/plot_cross_section.py new file mode 100644 index 0000000000..8e5bee85ed --- /dev/null +++ b/docs/gallery_code/general/plot_cross_section.py @@ -0,0 +1,42 @@ +""" +Cross Section Plots +=================== + +This example demonstrates contour plots of a cross-sectioned multi-dimensional +cube which features a hybrid height vertical coordinate system. + +""" # noqa: D205, D212, D400 + +import matplotlib.pyplot as plt + +import iris +import iris.plot as iplt +import iris.quickplot as qplt + + +def main(): + # Load some test data. + fname = iris.sample_data_path("hybrid_height.nc") + theta = iris.load_cube(fname, "air_potential_temperature") + + # Extract a single height vs longitude cross-section. N.B. This could + # easily be changed to extract a specific slice, or even to loop over *all* + # cross section slices. + cross_section = next(theta.slices(["grid_longitude", "model_level_number"])) + + qplt.contourf(cross_section, coords=["grid_longitude", "altitude"], cmap="RdBu_r") + iplt.show() + + # Now do the equivalent plot, only against model level + plt.figure() + + qplt.contourf( + cross_section, + coords=["grid_longitude", "model_level_number"], + cmap="RdBu_r", + ) + iplt.show() + + +if __name__ == "__main__": + main() diff --git a/docs/gallery_code/general/plot_custom_aggregation.py b/docs/gallery_code/general/plot_custom_aggregation.py new file mode 100644 index 0000000000..65fadfb473 --- /dev/null +++ b/docs/gallery_code/general/plot_custom_aggregation.py @@ -0,0 +1,96 @@ +""" +Calculating a Custom Statistic +============================== + +This example shows how to define and use a custom +:class:`iris.analysis.Aggregator`, that provides a new statistical operator for +use with cube aggregation functions such as :meth:`~iris.cube.Cube.collapsed`, +:meth:`~iris.cube.Cube.aggregated_by` or +:meth:`~iris.cube.Cube.rolling_window`. + +In this case, we have a 240-year sequence of yearly average surface temperature +over North America, and we want to calculate in how many years these exceed a +certain temperature over a spell of 5 years or more. + +""" # noqa: D205, D212, D400 + +import matplotlib.pyplot as plt +import numpy as np + +import iris +from iris.analysis import Aggregator +import iris.plot as iplt +import iris.quickplot as qplt +from iris.util import rolling_window + + +# Define a function to perform the custom statistical operation. +# Note: in order to meet the requirements of iris.analysis.Aggregator, it must +# do the calculation over an arbitrary (given) data axis. +def count_spells(data, threshold, axis, spell_length): + """Calculate the number of points in a sequence. + + Function to calculate the number of points in a sequence where the value + has exceeded a threshold value for at least a certain number of timepoints. + + Generalised to operate on multiple time sequences arranged on a specific + axis of a multidimensional array. + + Parameters + ---------- + data : array + Raw data to be compared with value threshold. + threshold : float + Threshold point for 'significant' datapoints. + axis : int + Number of the array dimension mapping the time sequences. + (Can also be negative, e.g. '-1' means last dimension). + spell_length : int + Number of consecutive times at which value > threshold to "count". + + """ + if axis < 0: + # just cope with negative axis numbers + axis += data.ndim + # Threshold the data to find the 'significant' points. + data_hits = data > threshold + # Make an array with data values "windowed" along the time axis. + hit_windows = rolling_window(data_hits, window=spell_length, axis=axis) + # Find the windows "full of True-s" (along the added 'window axis'). + full_windows = np.all(hit_windows, axis=axis + 1) + # Count points fulfilling the condition (along the time axis). + spell_point_counts = np.sum(full_windows, axis=axis, dtype=int) + return spell_point_counts + + +def main(): + # Load the whole time-sequence as a single cube. + file_path = iris.sample_data_path("E1_north_america.nc") + cube = iris.load_cube(file_path) + + # Make an aggregator from the user function. + SPELL_COUNT = Aggregator( + "spell_count", count_spells, units_func=lambda units, **kwargs: 1 + ) + + # Define the parameters of the test. + threshold_temperature = 280.0 + spell_years = 5 + + # Calculate the statistic. + warm_periods = cube.collapsed( + "time", + SPELL_COUNT, + threshold=threshold_temperature, + spell_length=spell_years, + ) + warm_periods.rename("Number of 5-year warm spells in 240 years") + + # Plot the results. + qplt.contourf(warm_periods, cmap="RdYlBu_r") + plt.gca().coastlines() + iplt.show() + + +if __name__ == "__main__": + main() diff --git a/docs/gallery_code/general/plot_custom_file_loading.py b/docs/gallery_code/general/plot_custom_file_loading.py new file mode 100644 index 0000000000..06de887614 --- /dev/null +++ b/docs/gallery_code/general/plot_custom_file_loading.py @@ -0,0 +1,339 @@ +""" +Loading a Cube From a Custom File Format +======================================== + +This example shows how a custom text file can be loaded using the standard Iris +load mechanism. + +The first stage in the process is to define an Iris :class:`FormatSpecification +` for the file format. To create a +format specification we need to define the following: + +* **format_name** - Some text that describes the format specification we are + creating +* **file_element** - FileElement object describing the element which identifies + this FormatSpecification. + + Possible values are: + + * ``iris.io.format_picker.MagicNumber(n, o)`` + The n bytes from the file at offset o. + + * ``iris.io.format_picker.FileExtension()`` + The file extension. + + * ``iris.io.format_picker.LeadingLine()`` + The first line of the file. + +* **file_element_value** - The value that the file_element should take if a file + matches this FormatSpecification +* **handler** (optional) - A generator function that will be called when the file + specification has been identified. This function is provided by the user and + provides the means to parse the whole file. If no handler function is + provided, then identification is still possible without any handling. + + The handler function must define the following arguments: + + * list of filenames to process + * callback function - An optional function to filter/alter the Iris cubes + returned + + The handler function must be defined as generator which yields each cube as + they are produced. + +* **priority** (optional) - Integer giving a priority for considering this + specification where higher priority means sooner consideration + +In the following example, the function :func:`load_NAME_III` has been defined +to handle the loading of the raw data from the custom file format. This +function is called from :func:`NAME_to_cube` which uses this data to create and +yield Iris cubes. + +In the ``main()`` function the filenames are loaded via the ``iris.load_cube`` +function which automatically invokes the ``FormatSpecification`` we defined. +The cube returned from the load function is then used to produce a plot. + +""" # noqa: D205, D212, D400 + +import datetime + +from cf_units import CALENDAR_STANDARD, Unit +import matplotlib.pyplot as plt +import numpy as np + +import iris +import iris.coord_systems as icoord_systems +import iris.coords as icoords +import iris.fileformats +import iris.io.format_picker as format_picker +import iris.plot as iplt + +UTC_format = "%H%M%Z %d/%m/%Y" + +FLOAT_HEADERS = [ + "X grid origin", + "Y grid origin", + "X grid resolution", + "Y grid resolution", +] +INT_HEADERS = ["X grid size", "Y grid size", "Number of fields"] +DATE_HEADERS = ["Run time", "Start of release", "End of release"] +COLUMN_NAMES = [ + "species_category", + "species", + "cell_measure", + "quantity", + "unit", + "z_level", + "time", +] + + +def load_NAME_III(filename): + """Load the Met Office's NAME III grid output files. + + Loads the Met Office's NAME III grid output files returning headers, column + definitions and data arrays as 3 separate lists. + + """ + # Loading a file gives a generator of lines which can be progressed using + # the next() function. This will come in handy as we wish to progress + # through the file line by line. + with open(filename) as file_handle: + # Define a dictionary which can hold the header metadata for this file. + headers = {} + + # Skip the NAME header of the file which looks something like + # 'NAME III (version X.X.X)'. + next(file_handle) + + # Read the next 16 lines of header information, putting the form + # "header name: header value" into a dictionary. + for _ in range(16): + header_name, header_value = next(file_handle).split(":") + + # Strip off any spurious space characters in the header name and + # value. + header_name = header_name.strip() + header_value = header_value.strip() + + # Cast some headers into floats or integers if they match a given + # header name. + if header_name in FLOAT_HEADERS: + header_value = float(header_value) + elif header_name in INT_HEADERS: + header_value = int(header_value) + elif header_name in DATE_HEADERS: + # convert the time to python datetimes + header_value = datetime.datetime.strptime(header_value, UTC_format) + + headers[header_name] = header_value + + # Skip the next blank line in the file. + next(file_handle) + + # Read the next 7 lines of column definitions. + column_headings = {} + for column_header_name in COLUMN_NAMES: + column_headings[column_header_name] = [ + col.strip() for col in next(file_handle).split(",") + ][:-1] + + # Convert the time to python datetimes. + new_time_column_header = [] + for i, t in enumerate(column_headings["time"]): + # The first 4 columns aren't time at all, so don't convert them to + # datetimes. + if i >= 4: + t = datetime.datetime.strptime(t, UTC_format) + new_time_column_header.append(t) + column_headings["time"] = new_time_column_header + + # Skip the blank line after the column headers. + next(file_handle) + + # Make a list of data arrays to hold the data for each column. + data_shape = (headers["Y grid size"], headers["X grid size"]) + data_arrays = [ + np.zeros(data_shape, dtype=np.float32) + for i in range(headers["Number of fields"]) + ] + + # Iterate over the remaining lines which represent the data in a column + # form. + for line in file_handle: + # Split the line by comma, removing the last empty column caused by + # the trailing comma. + vals = line.split(",")[:-1] + + # Cast the x and y grid positions to floats and convert them to + # zero based indices (the numbers are 1 based grid positions where + # 0.5 represents half a grid point.) + x = int(float(vals[0]) - 1.5) + y = int(float(vals[1]) - 1.5) + + # Populate the data arrays (i.e. all columns but the leading 4). + for i, data_array in enumerate(data_arrays): + data_array[y, x] = float(vals[i + 4]) + + return headers, column_headings, data_arrays + + +def NAME_to_cube(filenames, callback): + """Return a generator of cubes given a list of filenames and a callback.""" + for filename in filenames: + header, column_headings, data_arrays = load_NAME_III(filename) + + for i, data_array in enumerate(data_arrays): + # turn the dictionary of column headers with a list of header + # information for each field into a dictionary of headers for just + # this field. Ignore the first 4 columns of grid position (data was + # located with the data array). + field_headings = dict((k, v[i + 4]) for k, v in column_headings.items()) + + # make an cube + cube = iris.cube.Cube(data_array) + + # define the name and unit + name = "%s %s" % ( + field_headings["species"], + field_headings["quantity"], + ) + name = name.upper().replace(" ", "_") + cube.rename(name) + # Some units are badly encoded in the file, fix this by putting a + # space in between. (if gs is not found, then the string will be + # returned unchanged) + cube.units = field_headings["unit"].replace("gs", "g s") + + # define and add the singular coordinates of the field (flight + # level, time etc.) + cube.add_aux_coord( + icoords.AuxCoord( + field_headings["z_level"], + long_name="flight_level", + units="1", + ) + ) + + # define the time unit and use it to serialise the datetime for the + # time coordinate + time_unit = Unit("hours since epoch", calendar=CALENDAR_STANDARD) + time_coord = icoords.AuxCoord( + time_unit.date2num(field_headings["time"]), + standard_name="time", + units=time_unit, + ) + cube.add_aux_coord(time_coord) + + # build a coordinate system which can be referenced by latitude and + # longitude coordinates + lat_lon_coord_system = icoord_systems.GeogCS(6371229) + + # build regular latitude and longitude coordinates which have + # bounds + start = header["X grid origin"] + header["X grid resolution"] + step = header["X grid resolution"] + count = header["X grid size"] + pts = start + np.arange(count, dtype=np.float32) * step + lon_coord = icoords.DimCoord( + pts, + standard_name="longitude", + units="degrees", + coord_system=lat_lon_coord_system, + ) + lon_coord.guess_bounds() + + start = header["Y grid origin"] + header["Y grid resolution"] + step = header["Y grid resolution"] + count = header["Y grid size"] + pts = start + np.arange(count, dtype=np.float32) * step + lat_coord = icoords.DimCoord( + pts, + standard_name="latitude", + units="degrees", + coord_system=lat_lon_coord_system, + ) + lat_coord.guess_bounds() + + # add the latitude and longitude coordinates to the cube, with + # mappings to data dimensions + cube.add_dim_coord(lat_coord, 0) + cube.add_dim_coord(lon_coord, 1) + + # implement standard iris callback capability. Although callbacks + # are not used in this example, the standard mechanism for a custom + # loader to implement a callback is shown: + cube = iris.io.run_callback( + callback, cube, [header, field_headings, data_array], filename + ) + + # yield the cube created (the loop will continue when the next() + # element is requested) + yield cube + + +# Create a format_picker specification of the NAME file format giving it a +# priority greater than the built in NAME loader. +_NAME_III_spec = format_picker.FormatSpecification( + "Name III", + format_picker.LeadingLine(), + lambda line: line.startswith(b"NAME III"), + NAME_to_cube, + priority=6, +) + +# Register the NAME loader with iris +iris.fileformats.FORMAT_AGENT.add_spec(_NAME_III_spec) + + +# --------------------------------------------- +# | Using the new loader | +# --------------------------------------------- + + +def main(): + fname = iris.sample_data_path("NAME_output.txt") + + boundary_volc_ash_constraint = iris.Constraint( + "VOLCANIC_ASH_AIR_CONCENTRATION", flight_level="From FL000 - FL200" + ) + + # Callback shown as None to illustrate where a cube-level callback function + # would be used if required + cube = iris.load_cube(fname, boundary_volc_ash_constraint, callback=None) + + # draw contour levels for the data (the top level is just a catch-all) + levels = (0.0002, 0.002, 0.004, 1e10) + cs = iplt.contourf( + cube, + levels=levels, + colors=("#80ffff", "#939598", "#e00404"), + ) + + # draw a black outline at the lowest contour to highlight affected areas + iplt.contour(cube, levels=(levels[0], 100), colors="black") + + # set an extent and a background image for the map + ax = plt.gca() + ax.set_extent((-90, 20, 20, 75)) + ax.stock_img("ne_shaded") + + # make a legend, with custom labels, for the coloured contour set + artists, _ = cs.legend_elements() + labels = [ + r"$%s < x \leq %s$" % (levels[0], levels[1]), + r"$%s < x \leq %s$" % (levels[1], levels[2]), + r"$x > %s$" % levels[2], + ] + ax.legend(artists, labels, title="Ash concentration / g m-3", loc="upper left") + + time = cube.coord("time") + time_date = time.units.num2date(time.points[0]).strftime(UTC_format) + plt.title("Volcanic ash concentration forecast\nvalid at %s" % time_date) + + iplt.show() + + +if __name__ == "__main__": + main() diff --git a/docs/gallery_code/general/plot_global_map.py b/docs/gallery_code/general/plot_global_map.py new file mode 100644 index 0000000000..60ac200a43 --- /dev/null +++ b/docs/gallery_code/general/plot_global_map.py @@ -0,0 +1,37 @@ +""" +Quickplot of a 2D Cube on a Map +=============================== + +This example demonstrates a contour plot of global air temperature. The plot +title and the labels for the axes are automatically derived from the metadata. + +""" # noqa: D205, D212, D400 + +import cartopy.crs as ccrs +import matplotlib.pyplot as plt + +import iris +import iris.plot as iplt +import iris.quickplot as qplt + + +def main(): + fname = iris.sample_data_path("air_temp.pp") + temperature = iris.load_cube(fname) + + # Plot #1: contourf with axes longitude from -180 to 180 + plt.figure(figsize=(12, 5)) + plt.subplot(121) + qplt.contourf(temperature, 15) + plt.gca().coastlines() + + # Plot #2: contourf with axes longitude from 0 to 360 + proj = ccrs.PlateCarree(central_longitude=-180.0) + plt.subplot(122, projection=proj) + qplt.contourf(temperature, 15) + plt.gca().coastlines() + iplt.show() + + +if __name__ == "__main__": + main() diff --git a/docs/gallery_code/general/plot_inset.py b/docs/gallery_code/general/plot_inset.py new file mode 100644 index 0000000000..5edd375743 --- /dev/null +++ b/docs/gallery_code/general/plot_inset.py @@ -0,0 +1,69 @@ +""" +Test Data Showing Inset Plots +============================= + +This example demonstrates the use of a single 3D data cube with time, latitude +and longitude dimensions to plot a temperature series for a single latitude +coordinate, with an inset plot of the data region. + +""" # noqa: D205, D212, D400 + +import cartopy.crs as ccrs +import matplotlib.pyplot as plt +import numpy as np + +import iris +import iris.plot as iplt +import iris.quickplot as qplt + + +def main(): + cube1 = iris.load_cube(iris.sample_data_path("ostia_monthly.nc")) + # Slice into cube to retrieve data for the inset map showing the + # data region + region = cube1[-1, :, :] + # Average over latitude to reduce cube to 1 dimension + plot_line = region.collapsed("latitude", iris.analysis.MEAN) + + # Open a window for plotting + fig = plt.figure() + # Add a single subplot (axes). Could also use "ax_main = plt.subplot()" + ax_main = fig.add_subplot(1, 1, 1) + # Produce a quick plot of the 1D cube + qplt.plot(plot_line) + + # Set x limits to match the data + ax_main.set_xlim(0, plot_line.coord("longitude").points.max()) + # Adjust the y limits so that the inset map won't clash with main plot + ax_main.set_ylim(294, 310) + ax_main.set_title("Meridional Mean Temperature") + # Add grid lines + ax_main.grid() + + # Add a second set of axes specifying the fractional coordinates within + # the figure with bottom left corner at x=0.55, y=0.58 with width + # 0.3 and height 0.25. + # Also specify the projection + ax_sub = fig.add_axes( + [0.55, 0.58, 0.3, 0.25], + projection=ccrs.Mollweide(central_longitude=180), + ) + + # Use iris.plot (iplt) here so colour bar properties can be specified + # Also use a sequential colour scheme to reduce confusion for those with + # colour-blindness + iplt.pcolormesh(region, cmap="Blues") + # Manually set the orientation and tick marks on your colour bar + ticklist = np.linspace(np.min(region.data), np.max(region.data), 4) + plt.colorbar(orientation="horizontal", ticks=ticklist) + ax_sub.set_title("Data Region") + # Add coastlines + ax_sub.coastlines() + # request to show entire map, using the colour mesh on the data region only + ax_sub.set_global() + + qplt.show() + + +if __name__ == "__main__": + main() diff --git a/docs/gallery_code/general/plot_lineplot_with_legend.py b/docs/gallery_code/general/plot_lineplot_with_legend.py new file mode 100644 index 0000000000..d1b3acd912 --- /dev/null +++ b/docs/gallery_code/general/plot_lineplot_with_legend.py @@ -0,0 +1,48 @@ +""" +Multi-Line Temperature Profile Plot +=================================== + +""" # noqa: D205, D212, D400 + +import matplotlib.pyplot as plt + +import iris +import iris.plot as iplt +import iris.quickplot as qplt + + +def main(): + fname = iris.sample_data_path("air_temp.pp") + + # Load exactly one cube from the given file. + temperature = iris.load_cube(fname) + + # We only want a small number of latitudes, so filter some out + # using "extract". + temperature = temperature.extract( + iris.Constraint(latitude=lambda cell: 68 <= cell < 78) + ) + + for cube in temperature.slices("longitude"): + # Create a string label to identify this cube (i.e. latitude: value). + cube_label = "latitude: %s" % cube.coord("latitude").points[0] + + # Plot the cube, and associate it with a label. + qplt.plot(cube, label=cube_label) + + # Add the legend with 2 columns. + plt.legend(ncol=2) + + # Put a grid on the plot. + plt.grid(True) + + # Tell matplotlib not to extend the plot axes range to nicely + # rounded numbers. + plt.axis("tight") + + # Finally, show it. + iplt.show() + + +if __name__ == "__main__": + main() diff --git a/docs/gallery_code/general/plot_polar_stereo.py b/docs/gallery_code/general/plot_polar_stereo.py new file mode 100644 index 0000000000..99abbd0ae0 --- /dev/null +++ b/docs/gallery_code/general/plot_polar_stereo.py @@ -0,0 +1,28 @@ +""" +Example of a Polar Stereographic Plot +===================================== + +Demonstrates plotting data that are defined on a polar stereographic +projection. + +""" # noqa: D205, D212, D400 + +import matplotlib.pyplot as plt + +import iris +import iris.plot as iplt +import iris.quickplot as qplt + + +def main(): + file_path = iris.sample_data_path("toa_brightness_stereographic.nc") + cube = iris.load_cube(file_path) + qplt.contourf(cube) + ax = plt.gca() + ax.coastlines() + ax.gridlines() + iplt.show() + + +if __name__ == "__main__": + main() diff --git a/docs/gallery_code/general/plot_polynomial_fit.py b/docs/gallery_code/general/plot_polynomial_fit.py new file mode 100644 index 0000000000..37cc4e283b --- /dev/null +++ b/docs/gallery_code/general/plot_polynomial_fit.py @@ -0,0 +1,54 @@ +""" +Fitting a Polynomial +==================== + +This example demonstrates computing a polynomial fit to 1D data from an Iris +cube, adding the fit to the cube's metadata, and plotting both the 1D data and +the fit. + +""" # noqa: D205, D212, D400 + +import matplotlib.pyplot as plt +import numpy as np + +import iris +import iris.quickplot as qplt + + +def main(): + # Load some test data. + fname = iris.sample_data_path("A1B_north_america.nc") + cube = iris.load_cube(fname) + + # Extract a single time series at a latitude and longitude point. + location = next(cube.slices(["time"])) + + # Calculate a polynomial fit to the data at this time series. + x_points = location.coord("time").points + y_points = location.data + degree = 2 + + p = np.polyfit(x_points, y_points, degree) + y_fitted = np.polyval(p, x_points) + + # Add the polynomial fit values to the time series to take + # full advantage of Iris plotting functionality. + long_name = "degree_{}_polynomial_fit_of_{}".format(degree, cube.name()) + fit = iris.coords.AuxCoord(y_fitted, long_name=long_name, units=location.units) + location.add_aux_coord(fit, 0) + + qplt.plot(location.coord("time"), location, label="data") + qplt.plot( + location.coord("time"), + location.coord(long_name), + "g-", + label="polynomial fit", + ) + plt.legend(loc="best") + plt.title("Trend of US air temperature over time") + + qplt.show() + + +if __name__ == "__main__": + main() diff --git a/docs/gallery_code/general/plot_projections_and_annotations.py b/docs/gallery_code/general/plot_projections_and_annotations.py new file mode 100644 index 0000000000..6e8ba5a5af --- /dev/null +++ b/docs/gallery_code/general/plot_projections_and_annotations.py @@ -0,0 +1,140 @@ +""" +Plotting in Different Projections +================================= + +This example shows how to overlay data and graphics in different projections, +demonstrating various features of Iris, Cartopy and matplotlib. + +We wish to overlay two datasets, defined on different rotated-pole grids. +To display both together, we make a pseudocoloured plot of the first, overlaid +with contour lines from the second. +We also add some lines and text annotations drawn in various projections. + +We plot these over a specified region, in two different map projections. + +""" # noqa: D205, D212, D400 + +import cartopy.crs as ccrs +import matplotlib.pyplot as plt +import numpy as np + +import iris +import iris.plot as iplt + +# Define a Cartopy 'ordinary' lat-lon coordinate reference system. +crs_latlon = ccrs.PlateCarree() + + +def make_plot(projection_name, projection_crs): + # Create a matplotlib Figure. + plt.figure() + + # Add a matplotlib Axes, specifying the required display projection. + # NOTE: specifying 'projection' (a "cartopy.crs.Projection") makes the + # resulting Axes a "cartopy.mpl.geoaxes.GeoAxes", which supports plotting + # in different coordinate systems. + ax = plt.axes(projection=projection_crs) + + # Set display limits to include a set region of latitude * longitude. + # (Note: Cartopy-specific). + ax.set_extent((-80.0, 20.0, 10.0, 80.0), crs=crs_latlon) + + # Add coastlines and meridians/parallels (Cartopy-specific). + ax.coastlines(linewidth=0.75, color="navy") + ax.gridlines(crs=crs_latlon, linestyle="-") + + # Plot the first dataset as a pseudocolour filled plot. + maindata_filepath = iris.sample_data_path("rotated_pole.nc") + main_data = iris.load_cube(maindata_filepath) + # NOTE: iplt.pcolormesh calls "pyplot.pcolormesh", passing in a coordinate + # system with the 'transform' keyword: This enables the Axes (a cartopy + # GeoAxes) to reproject the plot into the display projection. + iplt.pcolormesh(main_data, cmap="RdBu_r") + + # Overplot the other dataset (which has a different grid), as contours. + overlay_filepath = iris.sample_data_path("space_weather.nc") + overlay_data = iris.load_cube(overlay_filepath, "total electron content") + # NOTE: as above, "iris.plot.contour" calls "pyplot.contour" with a + # 'transform' keyword, enabling Cartopy reprojection. + iplt.contour(overlay_data, 20, linewidths=2.0, colors="darkgreen", linestyles="-") + + # Draw a high resolution margin line, inset from the pcolormesh border. + # First calculate rectangle corners, 7% in from each corner of the data. + x_coord, y_coord = main_data.coord(axis="x"), main_data.coord(axis="y") + x_start, x_end = np.min(x_coord.points), np.max(x_coord.points) + y_start, y_end = np.min(y_coord.points), np.max(y_coord.points) + margin = 0.07 + margin_fractions = np.array([margin, 1.0 - margin]) + x_lower, x_upper = x_start + (x_end - x_start) * margin_fractions + y_lower, y_upper = y_start + (y_end - y_start) * margin_fractions + steps = np.linspace(0, 1) + zeros, ones = np.zeros(steps.size), np.ones(steps.size) + x_delta, y_delta = (x_upper - x_lower), (y_upper - y_lower) + x_points = x_lower + x_delta * np.concatenate((steps, ones, steps[::-1], zeros)) + y_points = y_lower + y_delta * np.concatenate((zeros, steps, ones, steps[::-1])) + # Get the Iris coordinate system of the X coordinate (Y should be the same). + cs_data1 = x_coord.coord_system + # Construct an equivalent Cartopy coordinate reference system ("crs"). + crs_data1 = cs_data1.as_cartopy_crs() + # Draw the rectangle in this crs, with matplotlib "pyplot.plot". + # NOTE: the 'transform' keyword specifies a non-display coordinate system + # for the plot points (as used by the "iris.plot" functions). + plt.plot( + x_points, + y_points, + transform=crs_data1, + linewidth=2.0, + color="white", + linestyle="--", + ) + + # Mark some particular places with a small circle and a name label... + # Define some test points with latitude and longitude coordinates. + city_data = [ + ("London", 51.5072, 0.1275), + ("Halifax, NS", 44.67, -63.61), + ("Reykjavik", 64.1333, -21.9333), + ] + # Place a single marker point and a text annotation at each place. + for name, lat, lon in city_data: + plt.plot( + lon, + lat, + marker="o", + markersize=7.0, + markeredgewidth=2.5, + markerfacecolor="black", + markeredgecolor="white", + transform=crs_latlon, + ) + # NOTE: the "plt.annotate call" does not have a "transform=" keyword, + # so for this one we transform the coordinates with a Cartopy call. + at_x, at_y = ax.projection.transform_point(lon, lat, src_crs=crs_latlon) + plt.annotate( + name, + xy=(at_x, at_y), + xytext=(30, 20), + textcoords="offset points", + color="black", + backgroundcolor="white", + size="large", + arrowprops=dict(arrowstyle="->", color="white", linewidth=2.5), + ) + + # Add a title, and display. + plt.title( + "A pseudocolour plot on the {} projection,\nwith overlaid contours.".format( + projection_name + ) + ) + iplt.show() + + +def main(): + # Demonstrate with two different display projections. + make_plot("Equidistant Cylindrical", ccrs.PlateCarree()) + make_plot("North Polar Stereographic", ccrs.NorthPolarStereo()) + + +if __name__ == "__main__": + main() diff --git a/docs/gallery_code/general/plot_rotated_pole_mapping.py b/docs/gallery_code/general/plot_rotated_pole_mapping.py new file mode 100644 index 0000000000..e9e3656184 --- /dev/null +++ b/docs/gallery_code/general/plot_rotated_pole_mapping.py @@ -0,0 +1,65 @@ +""" +Rotated Pole Mapping +==================== + +This example uses several visualisation methods to achieve an array of +differing images, including: + +* Visualisation of point based data +* Contouring of point based data +* Block plot of contiguous bounded data +* Non native projection and a Natural Earth shaded relief image underlay + +""" # noqa: D205, D212, D400 + +import cartopy.crs as ccrs +import matplotlib.pyplot as plt + +import iris +import iris.analysis.cartography +import iris.plot as iplt +import iris.quickplot as qplt + + +def main(): + # Load some test data. + fname = iris.sample_data_path("rotated_pole.nc") + air_pressure = iris.load_cube(fname) + + # Plot #1: Point plot showing data values & a colorbar + plt.figure() + points = qplt.points(air_pressure, c=air_pressure.data) + cb = plt.colorbar(points, orientation="horizontal") + cb.set_label(air_pressure.units) + plt.gca().coastlines() + iplt.show() + + # Plot #2: Contourf of the point based data + plt.figure() + qplt.contourf(air_pressure, 15) + plt.gca().coastlines() + iplt.show() + + # Plot #3: Contourf overlaid by coloured point data + plt.figure() + qplt.contourf(air_pressure) + iplt.points(air_pressure, c=air_pressure.data) + plt.gca().coastlines() + iplt.show() + + # For the purposes of this example, add some bounds to the latitude + # and longitude + air_pressure.coord("grid_latitude").guess_bounds() + air_pressure.coord("grid_longitude").guess_bounds() + + # Plot #4: Block plot + plt.figure() + plt.axes(projection=ccrs.PlateCarree()) + iplt.pcolormesh(air_pressure) + plt.gca().stock_img() + plt.gca().coastlines() + iplt.show() + + +if __name__ == "__main__": + main() diff --git a/docs/gallery_code/general/plot_zonal_means.py b/docs/gallery_code/general/plot_zonal_means.py new file mode 100644 index 0000000000..d4ec1eb1fc --- /dev/null +++ b/docs/gallery_code/general/plot_zonal_means.py @@ -0,0 +1,91 @@ +""" +Zonal Mean Diagram of Air Temperature +===================================== + +This example demonstrates aligning a linear plot and a cartographic plot +using Matplotlib. + +""" # noqa: D205, D212, D400 + +import cartopy.crs as ccrs +import matplotlib.pyplot as plt +from mpl_toolkits.axes_grid1 import make_axes_locatable +import numpy as np + +import iris +from iris.analysis import MEAN +import iris.plot as iplt +import iris.quickplot as qplt + + +def main(): + # Loads air_temp.pp and "collapses" longitude into a single, average value. + fname = iris.sample_data_path("air_temp.pp") + temperature = iris.load_cube(fname) + collapsed_temp = temperature.collapsed("longitude", MEAN) + + # Set y-axes with -90 and 90 limits and steps of 15 per tick. + start, stop, step = -90, 90, 15 + yticks = np.arange(start, stop + step, step) + ylim = [start, stop] + + # Plot "temperature" on a cartographic plot and set the ticks and titles + # on the axes. + fig = plt.figure(figsize=[12, 4]) + + ax1 = fig.add_subplot(111, projection=ccrs.PlateCarree()) + im = iplt.contourf(temperature, cmap="RdYlBu_r") + ax1.coastlines() + ax1.gridlines() + ax1.set_xticks([-180, -90, 0, 90, 180]) + ax1.set_yticks(yticks) + ax1.set_title("Air Temperature") + ax1.set_ylabel(f"Latitude / {temperature.coord('latitude').units}") + ax1.set_xlabel(f"Longitude / {temperature.coord('longitude').units}") + ax1.set_ylim(*ylim) + + # Create a Matplotlib AxesDivider object to allow alignment of other + # Axes objects. + divider = make_axes_locatable(ax1) + + # Gives the air temperature bar size, colour and a title. + ax2 = divider.new_vertical( + size="5%", pad=0.5, axes_class=plt.Axes, pack_start=True + ) # creates 2nd axis + fig.add_axes(ax2) + cbar = plt.colorbar( + im, cax=ax2, orientation="horizontal" + ) # puts colour bar on second axis + cbar.ax.set_xlabel(f"{temperature.units}") # labels colour bar + + # Plot "collapsed_temp" on the mean graph and set the ticks and titles + # on the axes. + ax3 = divider.new_horizontal( + size="30%", pad=0.4, axes_class=plt.Axes + ) # create 3rd axis + fig.add_axes(ax3) + qplt.plot( + collapsed_temp, collapsed_temp.coord("latitude") + ) # plots temperature collapsed over longitude against latitude + ax3.axhline(0, color="k", linewidth=0.5) + + # Creates zonal mean details + ax3.set_title("Zonal Mean") + ax3.yaxis.set_label_position("right") + ax3.yaxis.tick_right() + ax3.set_yticks(yticks) + ax3.grid() + + # Round each tick for the third ax to the nearest 20 (ready for use). + data_max = collapsed_temp.data.max() + x_max = data_max - data_max % -20 + data_min = collapsed_temp.data.min() + x_min = data_min - data_min % 20 + ax3.set_xlim(x_min, x_max) + ax3.set_ylim(*ylim) + + plt.show() + + +if __name__ == "__main__": + main() diff --git a/docs/gallery_code/meteorology/README.rst b/docs/gallery_code/meteorology/README.rst new file mode 100644 index 0000000000..e8e902b498 --- /dev/null +++ b/docs/gallery_code/meteorology/README.rst @@ -0,0 +1,3 @@ +Meteorology +----------- + diff --git a/docs/gallery_code/meteorology/plot_COP_1d.py b/docs/gallery_code/meteorology/plot_COP_1d.py new file mode 100644 index 0000000000..84addd140a --- /dev/null +++ b/docs/gallery_code/meteorology/plot_COP_1d.py @@ -0,0 +1,112 @@ +""" +Global Average Annual Temperature Plot +====================================== + +Produces a time-series plot of North American temperature forecasts for 2 +different emission scenarios. Constraining data to a limited spatial area also +features in this example. + +The data used comes from the HadGEM2-AO model simulations for the A1B and E1 +scenarios, both of which were derived using the IMAGE Integrated Assessment +Model (Johns et al. 2011; Lowe et al. 2009). + +References +---------- + Johns T.C., et al. (2011) Climate change under aggressive mitigation: the + ENSEMBLES multi-model experiment. Climate Dynamics, Vol 37, No. 9-10, + doi:10.1007/s00382-011-1005-5. + + Lowe J.A., C.D. Hewitt, D.P. Van Vuuren, T.C. Johns, E. Stehfest, J-F. + Royer, and P. van der Linden, 2009. New Study For Climate Modeling, + Analyses, and Scenarios. Eos Trans. AGU, Vol 90, No. 21, + doi:10.1029/2009EO210001. + +.. seealso:: + + Further details on the aggregation functionality being used in this example + can be found in :ref:`cube-statistics`. + +""" # noqa: D205, D212, D400 + +import matplotlib.pyplot as plt +import numpy as np + +import iris +import iris.analysis.cartography +import iris.plot as iplt +import iris.quickplot as qplt + + +def main(): + # Load data into three Cubes, one for each set of NetCDF files. + e1 = iris.load_cube(iris.sample_data_path("E1_north_america.nc")) + + a1b = iris.load_cube(iris.sample_data_path("A1B_north_america.nc")) + + # load in the global pre-industrial mean temperature, and limit the domain + # to the same North American region that e1 and a1b are at. + north_america = iris.Constraint( + longitude=lambda v: 225 <= v <= 315, latitude=lambda v: 15 <= v <= 60 + ) + pre_industrial = iris.load_cube( + iris.sample_data_path("pre-industrial.pp"), north_america + ) + + # Generate area-weights array. As e1 and a1b are on the same grid we can + # do this just once and reuse. This method requires bounds on lat/lon + # coords, so let's add some in sensible locations using the "guess_bounds" + # method. + e1.coord("latitude").guess_bounds() + e1.coord("longitude").guess_bounds() + e1_grid_areas = iris.analysis.cartography.area_weights(e1) + pre_industrial.coord("latitude").guess_bounds() + pre_industrial.coord("longitude").guess_bounds() + pre_grid_areas = iris.analysis.cartography.area_weights(pre_industrial) + + # Perform the area-weighted mean for each of the datasets using the + # computed grid-box areas. + pre_industrial_mean = pre_industrial.collapsed( + ["latitude", "longitude"], iris.analysis.MEAN, weights=pre_grid_areas + ) + e1_mean = e1.collapsed( + ["latitude", "longitude"], iris.analysis.MEAN, weights=e1_grid_areas + ) + a1b_mean = a1b.collapsed( + ["latitude", "longitude"], iris.analysis.MEAN, weights=e1_grid_areas + ) + + # Plot the datasets + qplt.plot(e1_mean, label="E1 scenario", lw=1.5, color="blue") + qplt.plot(a1b_mean, label="A1B-Image scenario", lw=1.5, color="red") + + # Draw a horizontal line showing the pre-industrial mean + plt.axhline( + y=pre_industrial_mean.data, + color="gray", + linestyle="dashed", + label="pre-industrial", + lw=1.5, + ) + + # Constrain the period 1860-1999 and extract the observed data from a1b + constraint = iris.Constraint(time=lambda cell: 1860 <= cell.point.year <= 1999) + observed = a1b_mean.extract(constraint) + + # Assert that this data set is the same as the e1 scenario: + # they share data up to the 1999 cut off. + assert np.all(np.isclose(observed.data, e1_mean.extract(constraint).data)) + + # Plot the observed data + qplt.plot(observed, label="observed", color="black", lw=1.5) + + # Add a legend and title + plt.legend(loc="upper left") + plt.title("North American mean air temperature", fontsize=18) + + plt.xlabel("Time / year") + plt.grid() + iplt.show() + + +if __name__ == "__main__": + main() diff --git a/docs/gallery_code/meteorology/plot_COP_maps.py b/docs/gallery_code/meteorology/plot_COP_maps.py new file mode 100644 index 0000000000..1c5e865a8f --- /dev/null +++ b/docs/gallery_code/meteorology/plot_COP_maps.py @@ -0,0 +1,193 @@ +""" +Global Average Annual Temperature Maps +====================================== + +Produces maps of global temperature forecasts from the A1B and E1 scenarios. + +The data used comes from the HadGEM2-AO model simulations for the A1B and E1 +scenarios, both of which were derived using the IMAGE Integrated Assessment +Model (Johns et al. 2011; Lowe et al. 2009). + +References +---------- + Johns T.C., et al. (2011) Climate change under aggressive mitigation: the + ENSEMBLES multi-model experiment. Climate Dynamics, Vol 37, No. 9-10, + doi:10.1007/s00382-011-1005-5. + + Lowe J.A., C.D. Hewitt, D.P. Van Vuuren, T.C. Johns, E. Stehfest, J-F. + Royer, and P. van der Linden, 2009. New Study For Climate Modeling, + Analyses, and Scenarios. Eos Trans. AGU, Vol 90, No. 21, + doi:10.1029/2009EO210001. + +""" # noqa: D205, D212, D400 + +import os.path + +import matplotlib.pyplot as plt +import numpy as np + +import iris +import iris.coords as coords +import iris.plot as iplt + + +def cop_metadata_callback(cube, field, filename): + """Add an "Experiment" coordinate which comes from the filename.""" + # Extract the experiment name (such as A1B or E1) from the filename (in + # this case it is just the start of the file name, before the first "."). + fname = os.path.basename(filename) # filename without path. + experiment_label = fname.split(".")[0] + + # Create a coordinate with the experiment label in it... + exp_coord = coords.AuxCoord( + experiment_label, long_name="Experiment", units="no_unit" + ) + + # ...and add it to the cube. + cube.add_aux_coord(exp_coord) + + +def main(): + # Load E1 and A1B scenarios using the callback to update the metadata. + scenario_files = [ + iris.sample_data_path(fname) for fname in ["E1.2098.pp", "A1B.2098.pp"] + ] + scenarios = iris.load(scenario_files, callback=cop_metadata_callback) + + # Load the preindustrial reference data. + preindustrial = iris.load_cube(iris.sample_data_path("pre-industrial.pp")) + + # Define evenly spaced contour levels: -2.5, -1.5, ... 15.5, 16.5 with the + # specific colours. + levels = np.arange(20) - 2.5 + red = ( + np.array( + [ + 0, + 0, + 221, + 239, + 229, + 217, + 239, + 234, + 228, + 222, + 205, + 196, + 161, + 137, + 116, + 89, + 77, + 60, + 51, + ] + ) + / 256.0 + ) + green = ( + np.array( + [ + 16, + 217, + 242, + 243, + 235, + 225, + 190, + 160, + 128, + 87, + 72, + 59, + 33, + 21, + 29, + 30, + 30, + 29, + 26, + ] + ) + / 256.0 + ) + blue = ( + np.array( + [ + 255, + 255, + 243, + 169, + 99, + 51, + 63, + 37, + 39, + 21, + 27, + 23, + 22, + 26, + 29, + 28, + 27, + 25, + 22, + ] + ) + / 256.0 + ) + + # Put those colours into an array which can be passed to contourf as the + # specific colours for each level. + colors = np.stack([red, green, blue], axis=1) + + # Make a wider than normal figure to house two maps side-by-side. + fig, ax_array = plt.subplots(1, 2, figsize=(12, 5)) + + # Loop over our scenarios to make a plot for each. + for ax, experiment, label in zip(ax_array, ["E1", "A1B"], ["E1", "A1B-Image"]): + exp_cube = scenarios.extract_cube(iris.Constraint(Experiment=experiment)) + time_coord = exp_cube.coord("time") + + # Calculate the difference from the preindustial control run. + exp_anom_cube = exp_cube - preindustrial + + # Plot this anomaly. + plt.sca(ax) + ax.set_title(f"HadGEM2 {label} Scenario", fontsize=10) + contour_result = iplt.contourf( + exp_anom_cube, levels, colors=colors, extend="both" + ) + plt.gca().coastlines() + + # Now add a colour bar which spans the two plots. Here we pass Figure.axes + # which is a list of all (two) axes currently on the figure. Note that + # these are different to the contents of ax_array, because those were + # standard Matplotlib Axes that Iris automatically replaced with Cartopy + # GeoAxes. + cbar = plt.colorbar( + contour_result, ax=fig.axes, aspect=60, orientation="horizontal" + ) + + # Label the colour bar and add ticks. + cbar.set_label(preindustrial.units) + cbar.ax.tick_params(length=0) + + # Get the time datetime from the coordinate. + time = time_coord.units.num2date(time_coord.points[0]) + # Set a title for the entire figure, using the year from the datetime + # object. Also, set the y value for the title so that it is not tight to + # the top of the plot. + fig.suptitle( + f"Annual Temperature Predictions for {time.year}", + y=0.9, + fontsize=18, + ) + + iplt.show() + + +if __name__ == "__main__": + main() diff --git a/docs/gallery_code/meteorology/plot_TEC.py b/docs/gallery_code/meteorology/plot_TEC.py new file mode 100644 index 0000000000..e6269eaf9b --- /dev/null +++ b/docs/gallery_code/meteorology/plot_TEC.py @@ -0,0 +1,42 @@ +""" +Ionosphere Space Weather +======================== + +This space weather example plots a filled contour of rotated pole point +data with a shaded relief image underlay. The plot shows aggregated +vertical electron content in the ionosphere. + +The plot exhibits an interesting outline effect due to excluding data +values below a certain threshold. + +""" # noqa: D205, D212, D400 + +import matplotlib.pyplot as plt +import numpy.ma as ma + +import iris +import iris.plot as iplt +import iris.quickplot as qplt + + +def main(): + # Load the "total electron content" cube. + filename = iris.sample_data_path("space_weather.nc") + cube = iris.load_cube(filename, "total electron content") + + # Explicitly mask negative electron content. + cube.data = ma.masked_less(cube.data, 0) + + # Plot the cube using one hundred colour levels. + qplt.contourf(cube, 100) + plt.title("Total Electron Content") + plt.xlabel("longitude / degrees") + plt.ylabel("latitude / degrees") + plt.gca().stock_img() + plt.gca().coastlines() + + iplt.show() + + +if __name__ == "__main__": + main() diff --git a/docs/gallery_code/meteorology/plot_deriving_phenomena.py b/docs/gallery_code/meteorology/plot_deriving_phenomena.py new file mode 100644 index 0000000000..81a05be9b9 --- /dev/null +++ b/docs/gallery_code/meteorology/plot_deriving_phenomena.py @@ -0,0 +1,92 @@ +""" +Deriving Exner Pressure and Air Temperature +=========================================== + +This example shows some processing of cubes in order to derive further related +cubes; in this case the derived cubes are Exner pressure and air temperature +which are calculated by combining air pressure, air potential temperature and +specific humidity. Finally, the two new cubes are presented side-by-side in a +plot. + +""" # noqa: D205, D212, D400 + +import matplotlib.pyplot as plt +import matplotlib.ticker + +import iris +import iris.coords as coords +import iris.iterate +import iris.plot as iplt +import iris.quickplot as qplt + + +def limit_colorbar_ticks(contour_object): + """Limit colobar number of ticks. + + Take a contour object which has an associated colorbar and limits the + number of ticks on the colorbar to 4. + + """ + colorbar = contour_object.colorbar + colorbar.locator = matplotlib.ticker.MaxNLocator(4) + colorbar.update_ticks() + + +def main(): + fname = iris.sample_data_path("colpex.pp") + + # The list of phenomena of interest + phenomena = ["air_potential_temperature", "air_pressure"] + + # Define the constraint on standard name and model level + constraints = [ + iris.Constraint(phenom, model_level_number=1) for phenom in phenomena + ] + + air_potential_temperature, air_pressure = iris.load_cubes(fname, constraints) + + # Define a coordinate which represents 1000 hPa + p0 = coords.AuxCoord(1000, long_name="P0", units="hPa") + # Convert reference pressure 'p0' into the same units as 'air_pressure' + p0.convert_units(air_pressure.units) + + # Calculate Exner pressure + exner_pressure = (air_pressure / p0) ** (287.05 / 1005.0) + # Set the name (the unit is scalar) + exner_pressure.rename("exner_pressure") + + # Calculate air_temp + air_temperature = exner_pressure * air_potential_temperature + # Set the name (the unit is K) + air_temperature.rename("air_temperature") + + # Now create an iterator which will give us lat lon slices of + # exner pressure and air temperature in the form + # (exner_slice, air_temp_slice). + lat_lon_slice_pairs = iris.iterate.izip( + exner_pressure, + air_temperature, + coords=["grid_latitude", "grid_longitude"], + ) + + # For the purposes of this example, we only want to demonstrate the first + # plot. + lat_lon_slice_pairs = [next(lat_lon_slice_pairs)] + + plt.figure(figsize=(8, 4)) + for exner_slice, air_temp_slice in lat_lon_slice_pairs: + plt.subplot(121) + cont = qplt.contourf(exner_slice) + + # The default colorbar has a few too many ticks on it, causing text to + # overlap. Therefore, limit the number of ticks. + limit_colorbar_ticks(cont) + + plt.subplot(122) + cont = qplt.contourf(air_temp_slice) + limit_colorbar_ticks(cont) + iplt.show() + + +if __name__ == "__main__": + main() diff --git a/docs/gallery_code/meteorology/plot_hovmoller.py b/docs/gallery_code/meteorology/plot_hovmoller.py new file mode 100644 index 0000000000..829b370d78 --- /dev/null +++ b/docs/gallery_code/meteorology/plot_hovmoller.py @@ -0,0 +1,51 @@ +""" +Hovmoller Diagram of Monthly Surface Temperature +================================================ + +This example demonstrates the creation of a Hovmoller diagram with fine control +over plot ticks and labels. The data comes from the Met Office OSTIA project +and has been pre-processed to calculate the monthly mean sea surface +temperature. + +""" # noqa: D205, D212, D400 + +import matplotlib.dates as mdates +import matplotlib.pyplot as plt + +import iris +import iris.plot as iplt +import iris.quickplot as qplt + + +def main(): + # load a single cube of surface temperature between +/- 5 latitude + fname = iris.sample_data_path("ostia_monthly.nc") + cube = iris.load_cube( + fname, + iris.Constraint("surface_temperature", latitude=lambda v: -5 < v < 5), + ) + + # Take the mean over latitude + cube = cube.collapsed("latitude", iris.analysis.MEAN) + + # Now that we have our data in a nice way, lets create the plot + # contour with 20 levels + qplt.contourf(cube, 20) + + # Put a custom label on the y axis + plt.ylabel("Time / years") + + # Stop matplotlib providing clever axes range padding + plt.axis("tight") + + # As we are plotting annual variability, put years as the y ticks + plt.gca().yaxis.set_major_locator(mdates.YearLocator()) + + # And format the ticks to just show the year + plt.gca().yaxis.set_major_formatter(mdates.DateFormatter("%Y")) + + iplt.show() + + +if __name__ == "__main__": + main() diff --git a/docs/gallery_code/meteorology/plot_lagged_ensemble.py b/docs/gallery_code/meteorology/plot_lagged_ensemble.py new file mode 100644 index 0000000000..7c34572136 --- /dev/null +++ b/docs/gallery_code/meteorology/plot_lagged_ensemble.py @@ -0,0 +1,155 @@ +""" +Seasonal Ensemble Model Plots +============================= + +This example demonstrates the loading of a lagged ensemble dataset from the +GloSea4 model, which is then used to produce two types of plot: + +* The first shows the "postage stamp" style image with an array of 14 images, + one for each ensemble member with a shared colorbar. (The missing image in + this example represents ensemble member number 6 which was a failed run) + +* The second plot shows the data limited to a region of interest, in this case + a region defined for forecasting ENSO (El Nino-Southern Oscillation), which, + for the purposes of this example, has had the ensemble mean subtracted from + each ensemble member to give an anomaly surface temperature. In practice a + better approach would be to take the climatological mean, calibrated to the + model, from each ensemble member. + +""" # noqa: D205, D212, D400 + +import matplotlib.pyplot as plt +import matplotlib.ticker +import numpy as np + +import iris +import iris.plot as iplt + + +def realization_metadata(cube, field, fname): + """Modify the cube's metadata to add a "realization" coordinate. + + A function which modifies the cube's metadata to add a "realization" + (ensemble member) coordinate from the filename if one doesn't already exist + in the cube. + + """ + # Add an ensemble member coordinate if one doesn't already exist. + if not cube.coords("realization"): + # The ensemble member is encoded in the filename as *_???.pp where ??? + # is the ensemble member. + realization_number = fname[-6:-3] + realization_coord = iris.coords.AuxCoord( + np.int32(realization_number), "realization", units="1" + ) + cube.add_aux_coord(realization_coord) + + +def main(): + # Create a constraint to extract surface temperature cubes which have a + # "realization" coordinate. + constraint = iris.Constraint("surface_temperature", realization=lambda value: True) + # Use this to load our ensemble. The callback ensures all our members + # have the "realization" coordinate and therefore they will all be loaded. + surface_temp = iris.load_cube( + iris.sample_data_path("GloSea4", "ensemble_???.pp"), + constraint, + callback=realization_metadata, + ) + + # ------------------------------------------------------------------------- + # Plot #1: Ensemble postage stamps + # ------------------------------------------------------------------------- + + # For the purposes of this example, take the last time element of the cube. + # First get hold of the last time by slicing the coordinate. + last_time_coord = surface_temp.coord("time")[-1] + last_timestep = surface_temp.subset(last_time_coord) + + # Find the maximum and minimum across the dataset. + data_min = np.min(last_timestep.data) + data_max = np.max(last_timestep.data) + + # Create a wider than normal figure to support our many plots. + plt.figure(figsize=(12, 6), dpi=100) + + # Also manually adjust the spacings which are used when creating subplots. + plt.gcf().subplots_adjust( + hspace=0.05, + wspace=0.05, + top=0.95, + bottom=0.05, + left=0.075, + right=0.925, + ) + + # Iterate over all possible latitude longitude slices. + for cube in last_timestep.slices(["latitude", "longitude"]): + # Get the ensemble member number from the ensemble coordinate. + ens_member = cube.coord("realization").points[0] + + # Plot the data in a 4x4 grid, with each plot's position in the grid + # being determined by ensemble member number. The special case for the + # 13th ensemble member is to have the plot at the bottom right. + if ens_member == 13: + plt.subplot(4, 4, 16) + else: + plt.subplot(4, 4, ens_member + 1) + + # Plot with 50 evenly spaced contour levels (49 intervals). + cf = iplt.contourf(cube, 49, vmin=data_min, vmax=data_max) + + # Add coastlines. + plt.gca().coastlines() + + # Make an axes to put the shared colorbar in. + colorbar_axes = plt.gcf().add_axes([0.35, 0.1, 0.3, 0.05]) + colorbar = plt.colorbar(cf, colorbar_axes, orientation="horizontal") + colorbar.set_label(last_timestep.units) + + # Limit the colorbar to 8 tick marks. + colorbar.locator = matplotlib.ticker.MaxNLocator(8) + colorbar.update_ticks() + + # Get the time for the entire plot. + time = last_time_coord.units.num2date(last_time_coord.bounds[0, 0]) + + # Set a global title for the postage stamps with the date formatted by + # "monthname year". + time_string = time.strftime("%B %Y") + plt.suptitle(f"Surface temperature ensemble forecasts for {time_string}") + + iplt.show() + + # ------------------------------------------------------------------------- + # Plot #2: ENSO plumes + # ------------------------------------------------------------------------- + + # Nino 3.4 lies between: 170W and 120W, 5N and 5S, so use the intersection + # method to restrict to this region. + nino_cube = surface_temp.intersection(latitude=[-5, 5], longitude=[-170, -120]) + + # Calculate the horizontal mean for the nino region. + mean = nino_cube.collapsed(["latitude", "longitude"], iris.analysis.MEAN) + + # Calculate the ensemble mean of the horizontal mean. + ensemble_mean = mean.collapsed("realization", iris.analysis.MEAN) + + # Take the ensemble mean from each ensemble member. + mean -= ensemble_mean + + plt.figure() + + for ensemble_member in mean.slices(["time"]): + # Draw each ensemble member as a dashed line in black. + iplt.plot(ensemble_member, "--k") + + plt.title("Mean temperature anomaly for ENSO 3.4 region") + plt.xlabel("Time") + plt.ylabel("Temperature anomaly / K") + + iplt.show() + + +if __name__ == "__main__": + main() diff --git a/docs/gallery_code/meteorology/plot_wind_barbs.py b/docs/gallery_code/meteorology/plot_wind_barbs.py new file mode 100644 index 0000000000..f11c9a7b50 --- /dev/null +++ b/docs/gallery_code/meteorology/plot_wind_barbs.py @@ -0,0 +1,61 @@ +""" +Plotting Wind Direction Using Barbs +=================================== + +This example demonstrates using barbs to plot wind speed contours and wind +direction barbs from wind vector component input data. The vector components +are co-located in space in this case. + +The magnitude of the wind in the original data is low and so doesn't illustrate +the full range of barbs. The wind is scaled to simulate a storm that better +illustrates the range of barbs that are available. + +""" # noqa: D205, D212, D400 + +import matplotlib.pyplot as plt + +import iris +import iris.plot as iplt +import iris.quickplot as qplt + + +def main(): + # Load the u and v components of wind from a pp file + infile = iris.sample_data_path("wind_speed_lake_victoria.pp") + + uwind = iris.load_cube(infile, "x_wind") + vwind = iris.load_cube(infile, "y_wind") + + uwind.convert_units("knot") + vwind.convert_units("knot") + + # To illustrate the full range of barbs, scale the wind speed up to pretend + # that a storm is passing over + magnitude = (uwind**2 + vwind**2) ** 0.5 + magnitude.convert_units("knot") + max_speed = magnitude.collapsed(("latitude", "longitude"), iris.analysis.MAX).data + max_desired = 65 + + uwind = uwind / max_speed * max_desired + vwind = vwind / max_speed * max_desired + + # Create a cube containing the wind speed + windspeed = (uwind**2 + vwind**2) ** 0.5 + windspeed.rename("windspeed") + windspeed.convert_units("knot") + + plt.figure() + + # Plot the wind speed as a contour plot + qplt.contourf(windspeed) + + # Add wind barbs except for the outermost values which overhang the edge + # of the plot if left + iplt.barbs(uwind[1:-1, 1:-1], vwind[1:-1, 1:-1], pivot="middle", length=6) + + plt.title("Wind speed during a simulated storm") + qplt.show() + + +if __name__ == "__main__": + main() diff --git a/docs/gallery_code/meteorology/plot_wind_speed.py b/docs/gallery_code/meteorology/plot_wind_speed.py new file mode 100644 index 0000000000..5310ad937d --- /dev/null +++ b/docs/gallery_code/meteorology/plot_wind_speed.py @@ -0,0 +1,62 @@ +""" +Plotting Wind Direction Using Quiver +==================================== + +This example demonstrates using quiver to plot wind speed contours and wind +direction arrows from wind vector component input data. The vector components +are co-located in space in this case. + +For the second plot, the data used for the arrows is normalised to produce +arrows with a uniform size on the plot. + +""" # noqa: D205, D212, D400 + +import cartopy.feature as cfeat +import matplotlib.pyplot as plt + +import iris +import iris.plot as iplt +import iris.quickplot as qplt + + +def main(): + # Load the u and v components of wind from a pp file. + infile = iris.sample_data_path("wind_speed_lake_victoria.pp") + + uwind = iris.load_cube(infile, "x_wind") + vwind = iris.load_cube(infile, "y_wind") + + # Create a cube containing the wind speed. + windspeed = (uwind**2 + vwind**2) ** 0.5 + windspeed.rename("windspeed") + + # Plot the wind speed as a contour plot. + qplt.contourf(windspeed, 20) + + # Show the lake on the current axes. + lakes = cfeat.NaturalEarthFeature("physical", "lakes", "50m", facecolor="none") + plt.gca().add_feature(lakes) + + # Add arrows to show the wind vectors. + iplt.quiver(uwind, vwind, pivot="middle") + + plt.title("Wind speed over Lake Victoria") + qplt.show() + + # Normalise the data for uniform arrow size. + u_norm = uwind / windspeed + v_norm = vwind / windspeed + + # Make a new figure for the normalised plot. + plt.figure() + + qplt.contourf(windspeed, 20) + plt.gca().add_feature(lakes) + iplt.quiver(u_norm, v_norm, pivot="middle") + + plt.title("Wind speed over Lake Victoria") + qplt.show() + + +if __name__ == "__main__": + main() diff --git a/docs/gallery_code/oceanography/README.rst b/docs/gallery_code/oceanography/README.rst new file mode 100644 index 0000000000..0f3adf906b --- /dev/null +++ b/docs/gallery_code/oceanography/README.rst @@ -0,0 +1,3 @@ +Oceanography +------------ + diff --git a/docs/gallery_code/oceanography/plot_atlantic_profiles.py b/docs/gallery_code/oceanography/plot_atlantic_profiles.py new file mode 100644 index 0000000000..a43fb7f8cb --- /dev/null +++ b/docs/gallery_code/oceanography/plot_atlantic_profiles.py @@ -0,0 +1,92 @@ +""" +Oceanographic Profiles and T-S Diagrams +======================================= + +This example demonstrates how to plot vertical profiles of different +variables in the same axes, and how to make a scatter plot of two +variables. There is an oceanographic theme but the same techniques are +equally applicable to atmospheric or other kinds of data. + +The data used are profiles of potential temperature and salinity in the +Equatorial and South Atlantic, output from an ocean model. + +The y-axis of the first plot produced will be automatically inverted due to the +presence of the attribute positive=down on the depth coordinate. This means +depth values intuitively increase downward on the y-axis. + +""" # noqa: D205, D212, D400 + +import matplotlib.pyplot as plt + +import iris +import iris.iterate +import iris.plot as iplt + + +def main(): + # Load the gridded temperature and salinity data. + fname = iris.sample_data_path("atlantic_profiles.nc") + cubes = iris.load(fname) + (theta,) = cubes.extract("sea_water_potential_temperature") + (salinity,) = cubes.extract("sea_water_practical_salinity") + + # Extract profiles of temperature and salinity from a particular point in + # the southern portion of the domain, and limit the depth of the profile + # to 1000m. + lon_cons = iris.Constraint(longitude=330.5) + lat_cons = iris.Constraint(latitude=lambda lat: -10 < lat < -9) + depth_cons = iris.Constraint(depth=lambda d: d <= 1000) + theta_1000m = theta.extract(depth_cons & lon_cons & lat_cons) + salinity_1000m = salinity.extract(depth_cons & lon_cons & lat_cons) + + # Plot these profiles on the same set of axes. Depth is automatically + # recognised as a vertical coordinate and placed on the y-axis. + # The first plot is in the default axes. We'll use the same color for the + # curve and its axes/tick labels. + plt.figure(figsize=(5, 6)) + temperature_color = (0.3, 0.4, 0.5) + ax1 = plt.gca() + iplt.plot( + theta_1000m, + linewidth=2, + color=temperature_color, + alpha=0.75, + ) + ax1.set_xlabel("Potential Temperature / K", color=temperature_color) + ax1.set_ylabel("Depth / m") + for ticklabel in ax1.get_xticklabels(): + ticklabel.set_color(temperature_color) + + # To plot salinity in the same axes we use twiny(). We'll use a different + # color to identify salinity. + salinity_color = (0.6, 0.1, 0.15) + ax2 = plt.gca().twiny() + iplt.plot( + salinity_1000m, + linewidth=2, + color=salinity_color, + alpha=0.75, + ) + ax2.set_xlabel("Salinity / PSU", color=salinity_color) + for ticklabel in ax2.get_xticklabels(): + ticklabel.set_color(salinity_color) + plt.tight_layout() + iplt.show() + + # Now plot a T-S diagram using scatter. We'll use all the profiles here, + # and each point will be coloured according to its depth. + plt.figure(figsize=(6, 6)) + depth_values = theta.coord("depth").points + for s, t in iris.iterate.izip(salinity, theta, coords="depth"): + iplt.scatter(s, t, c=depth_values, marker="+", cmap="RdYlBu_r") + ax = plt.gca() + ax.set_xlabel("Salinity / PSU") + ax.set_ylabel("Potential Temperature / K") + cb = plt.colorbar(orientation="horizontal") + cb.set_label("Depth / m") + plt.tight_layout() + iplt.show() + + +if __name__ == "__main__": + main() diff --git a/docs/gallery_code/oceanography/plot_load_nemo.py b/docs/gallery_code/oceanography/plot_load_nemo.py new file mode 100644 index 0000000000..aac89fec0e --- /dev/null +++ b/docs/gallery_code/oceanography/plot_load_nemo.py @@ -0,0 +1,61 @@ +""" +Load a Time Series of Data From the NEMO Model +============================================== + +This example demonstrates how to load multiple files containing data output by +the NEMO model and combine them into a time series in a single cube. The +different time dimensions in these files can prevent Iris from concatenating +them without the intervention shown here. + +""" # noqa: D205, D212, D400 + +import matplotlib.pyplot as plt + +import iris +import iris.plot as iplt +import iris.quickplot as qplt +from iris.util import equalise_attributes, promote_aux_coord_to_dim_coord + + +def main(): + # Load the three files of sample NEMO data. + fname = iris.sample_data_path("NEMO/nemo_1m_*.nc") + cubes = iris.load(fname) + + # Some attributes are unique to each file and must be removed to allow + # concatenation. + equalise_attributes(cubes) + + # The cubes still cannot be concatenated because their dimension coordinate + # is "time_counter", which has the same value for each cube. concatenate + # needs distinct values in order to create a new DimCoord for the output + # cube. Here, each cube has a "time" auxiliary coordinate, and these do + # have distinct values, so we can promote them to allow concatenation. + for cube in cubes: + promote_aux_coord_to_dim_coord(cube, "time") + + # The cubes can now be concatenated into a single time series. + cube = cubes.concatenate_cube() + + # Generate a time series plot of a single point + plt.figure() + y_point_index = 100 + x_point_index = 100 + qplt.plot(cube[:, y_point_index, x_point_index], "o-") + + # Include the point's position in the plot's title + lat_point = cube.coord("latitude").points[y_point_index, x_point_index] + lat_string = "{:.3f}\u00b0 {}".format( + abs(lat_point), "N" if lat_point > 0.0 else "S" + ) + lon_point = cube.coord("longitude").points[y_point_index, x_point_index] + lon_string = "{:.3f}\u00b0 {}".format( + abs(lon_point), "E" if lon_point > 0.0 else "W" + ) + plt.title("{} at {} {}".format(cube.long_name.capitalize(), lat_string, lon_string)) + + iplt.show() + + +if __name__ == "__main__": + main() diff --git a/docs/gallery_code/oceanography/plot_orca_projection.py b/docs/gallery_code/oceanography/plot_orca_projection.py new file mode 100644 index 0000000000..bb68056cb3 --- /dev/null +++ b/docs/gallery_code/oceanography/plot_orca_projection.py @@ -0,0 +1,59 @@ +""" +Tri-Polar Grid Projected Plotting +================================= + +This example demonstrates cell plots of data on the semi-structured ORCA2 model +grid. + +First, the data is projected into the PlateCarree coordinate reference system. + +Second four pcolormesh plots are created from this projected dataset, +using different projections for the output image. + +""" # noqa: D205, D212, D400 + +import cartopy.crs as ccrs +import matplotlib.pyplot as plt + +import iris +import iris.analysis.cartography +import iris.plot as iplt +import iris.quickplot as qplt + + +def main(): + # Load data + filepath = iris.sample_data_path("orca2_votemper.nc") + cube = iris.load_cube(filepath) + + # Choose plot projections + projections = {} + projections["Mollweide"] = ccrs.Mollweide() + projections["PlateCarree"] = ccrs.PlateCarree() + projections["NorthPolarStereo"] = ccrs.NorthPolarStereo() + projections["Orthographic"] = ccrs.Orthographic( + central_longitude=-90, central_latitude=45 + ) + + pcarree = projections["PlateCarree"] + # Transform cube to target projection + new_cube, extent = iris.analysis.cartography.project(cube, pcarree, nx=400, ny=200) + + # Plot data in each projection + for name in sorted(projections): + fig = plt.figure() + fig.suptitle("ORCA2 Data Projected to {}".format(name)) + # Set up axes and title + ax = plt.subplot(projection=projections[name]) + # Set limits + ax.set_global() + # plot with Iris quickplot pcolormesh + qplt.pcolormesh(new_cube) + # Draw coastlines + ax.coastlines() + + iplt.show() + + +if __name__ == "__main__": + main() diff --git a/docs/gallery_tests/__init__.py b/docs/gallery_tests/__init__.py new file mode 100644 index 0000000000..9468138e04 --- /dev/null +++ b/docs/gallery_tests/__init__.py @@ -0,0 +1,6 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. + +"""Gallery Tests.""" diff --git a/docs/gallery_tests/conftest.py b/docs/gallery_tests/conftest.py new file mode 100644 index 0000000000..564a2892a2 --- /dev/null +++ b/docs/gallery_tests/conftest.py @@ -0,0 +1,67 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. + +"""Pytest fixtures for the gallery tests.""" + +import pathlib + +import matplotlib.pyplot as plt +import pytest + +import iris + +CURRENT_DIR = pathlib.Path(__file__).resolve() +GALLERY_DIR = CURRENT_DIR.parents[1] / "gallery_code" + + +@pytest.fixture +def image_setup_teardown(): + """Perform setup and teardown fixture. + + Ensures all figures are closed before and after test to prevent one test + polluting another if it fails with a figure unclosed. + + """ + plt.close("all") + yield + plt.close("all") + + +@pytest.fixture +def import_patches(monkeypatch): + """Replace plt.show() with a function that does nothing, also add to sys.path. + + Replace plt.show() with a function that does nothing, also add all the + gallery examples to sys.path. + + """ + + def no_show(): + pass + + monkeypatch.setattr(plt, "show", no_show) + + for example_dir in GALLERY_DIR.iterdir(): + if example_dir.is_dir(): + monkeypatch.syspath_prepend(example_dir) + + yield + + +@pytest.fixture +def iris_future_defaults(): + """Create a fixture which resets all the iris.FUTURE settings to the defaults. + + Create a fixture which resets all the iris.FUTURE settings to the defaults, + as otherwise changes made in one test can affect subsequent ones. + + """ + # Run with all default settings in iris.FUTURE. + default_future_kwargs = iris.Future().__dict__.copy() + for dead_option in iris.Future.deprecated_options: + # Avoid a warning when setting these ! + del default_future_kwargs[dead_option] + with iris.FUTURE.context(**default_future_kwargs): + yield diff --git a/docs/gallery_tests/test_gallery_examples.py b/docs/gallery_tests/test_gallery_examples.py new file mode 100644 index 0000000000..39e8fe0507 --- /dev/null +++ b/docs/gallery_tests/test_gallery_examples.py @@ -0,0 +1,43 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. + +"""Test all the gallery examples.""" + +import importlib + +import matplotlib.pyplot as plt +import pytest + +from iris.tests import _RESULT_PATH +from iris.tests.graphics import check_graphic + +from .conftest import GALLERY_DIR + + +def gallery_examples(): + """Entry point for generator to yield all current gallery examples.""" + for example_file in GALLERY_DIR.glob("*/plot*.py"): + yield example_file.stem + + +@pytest.mark.filterwarnings("error::iris.IrisDeprecation") +@pytest.mark.parametrize("example", gallery_examples()) +def test_plot_example( + example, + image_setup_teardown, + import_patches, + iris_future_defaults, +): + """Test that all figures from example code match KGO.""" + module = importlib.import_module(example) + + # Run example. + module.main() + # Loop through open figures and set each to be the current figure so check_graphic + # will find it. + for fig_num in plt.get_fignums(): + plt.figure(fig_num) + image_id = f"gallery_tests.test_{example}.{fig_num - 1}" + check_graphic(image_id, _RESULT_PATH) diff --git a/docs/iris/Makefile b/docs/iris/Makefile deleted file mode 100644 index 1a66b03805..0000000000 --- a/docs/iris/Makefile +++ /dev/null @@ -1,47 +0,0 @@ -SUBDIRS = src - -html: - @for i in $(SUBDIRS); do \ - echo "make html in $$i..."; \ - (cd $$i; $(MAKE) $(MFLAGS) $(MYMAKEFLAGS) html); done - -pdf: - @for i in $(SUBDIRS); do\ - echo "make latex in $$i.."; \ - (cd $$i; $(MAKE) $(MFLAGS) $(MYMAKEFLAGS) latex); done - echo "\def\sphinxdocclass{MO_report}" > build/latex/docs.tex - echo "\documentclass[letterpaper,10pt,english]{MO_report}" >> build/latex/docs.tex - tail -n +4 build/latex/Iris.tex >> build/latex/docs.tex - sed 's/\\tableofcontents/\\tableofcontents\n\\pagenumbering\{arabic\}/' build/latex/docs.tex > build/latex/docs2.tex - sed 's/subsection{/section{/' build/latex/docs2.tex > build/latex/documentation.tex - (cd build/latex; pdflatex -interaction=scrollmode documentation.tex) - # call latex again to get page numbers right... - (cd build/latex; pdflatex -interaction=scrollmode documentation.tex); - -all: - @for i in $(SUBDIRS); do \ - echo "make all in $$i..."; \ - (cd $$i; $(MAKE) $(MFLAGS) $(MYMAKEFLAGS) all); done -install: - @for i in $(SUBDIRS); do \ - echo "Installing in $$i..."; \ - (cd $$i; $(MAKE) $(MFLAGS) $(MYMAKEFLAGS) install); done -build: - @for i in $(SUBDIRS); do \ - echo "Clearing in $$i..."; \ - (cd $$i; $(MAKE) $(MFLAGS) $(MYMAKEFLAGS) build); done -clean: - @for i in $(SUBDIRS); do \ - echo "Clearing in $$i..."; \ - (cd $$i; $(MAKE) $(MFLAGS) $(MYMAKEFLAGS) clean); done - -doctest: - @for i in $(SUBDIRS); do \ - echo "Running doctest in $$i..."; \ - (cd $$i; $(MAKE) $(MFLAGS) $(MYMAKEFLAGS) doctest); done - -extest: - @echo - @echo "Running \"example_code/graphics\" tests..." - @echo - python -m unittest discover -v -t . diff --git a/docs/iris/example_code/General/SOI_filtering.py b/docs/iris/example_code/General/SOI_filtering.py deleted file mode 100644 index a488b5865e..0000000000 --- a/docs/iris/example_code/General/SOI_filtering.py +++ /dev/null @@ -1,96 +0,0 @@ -""" -Applying a filter to a time-series -================================== - -This example demonstrates low pass filtering a time-series by applying a -weighted running mean over the time dimension. - -The time-series used is the Darwin-only Southern Oscillation index (SOI), -which is filtered using two different Lanczos filters, one to filter out -time-scales of less than two years and one to filter out time-scales of -less than 7 years. - -References ----------- - - Duchon C. E. (1979) Lanczos Filtering in One and Two Dimensions. - Journal of Applied Meteorology, Vol 18, pp 1016-1022. - - Trenberth K. E. (1984) Signal Versus Noise in the Southern Oscillation. - Monthly Weather Review, Vol 112, pp 326-332 - -""" -import numpy as np -import matplotlib.pyplot as plt -import iris -import iris.plot as iplt - - -def low_pass_weights(window, cutoff): - """Calculate weights for a low pass Lanczos filter. - - Args: - - window: int - The length of the filter window. - - cutoff: float - The cutoff frequency in inverse time steps. - - """ - order = ((window - 1) // 2) + 1 - nwts = 2 * order + 1 - w = np.zeros([nwts]) - n = nwts // 2 - w[n] = 2 * cutoff - k = np.arange(1., n) - sigma = np.sin(np.pi * k / n) * n / (np.pi * k) - firstfactor = np.sin(2. * np.pi * cutoff * k) / (np.pi * k) - w[n-1:0:-1] = firstfactor * sigma - w[n+1:-1] = firstfactor * sigma - return w[1:-1] - - -def main(): - # Load the monthly-valued Southern Oscillation Index (SOI) time-series. - fname = iris.sample_data_path('SOI_Darwin.nc') - soi = iris.load_cube(fname) - - # Window length for filters. - window = 121 - - # Construct 2-year (24-month) and 7-year (84-month) low pass filters - # for the SOI data which is monthly. - wgts24 = low_pass_weights(window, 1. / 24.) - wgts84 = low_pass_weights(window, 1. / 84.) - - # Apply each filter using the rolling_window method used with the weights - # keyword argument. A weighted sum is required because the magnitude of - # the weights are just as important as their relative sizes. - soi24 = soi.rolling_window('time', - iris.analysis.SUM, - len(wgts24), - weights=wgts24) - soi84 = soi.rolling_window('time', - iris.analysis.SUM, - len(wgts84), - weights=wgts84) - - # Plot the SOI time series and both filtered versions. - plt.figure(figsize=(9, 4)) - iplt.plot(soi, color='0.7', linewidth=1., linestyle='-', - alpha=1., label='no filter') - iplt.plot(soi24, color='b', linewidth=2., linestyle='-', - alpha=.7, label='2-year filter') - iplt.plot(soi84, color='r', linewidth=2., linestyle='-', - alpha=.7, label='7-year filter') - plt.ylim([-4, 4]) - plt.title('Southern Oscillation Index (Darwin Only)') - plt.xlabel('Time') - plt.ylabel('SOI') - plt.legend(fontsize=10) - iplt.show() - - -if __name__ == '__main__': - main() diff --git a/docs/iris/example_code/General/__init__.py b/docs/iris/example_code/General/__init__.py deleted file mode 100644 index f67741cf37..0000000000 --- a/docs/iris/example_code/General/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -""" -General visualisation examples -============================== -""" diff --git a/docs/iris/example_code/General/anomaly_log_colouring.py b/docs/iris/example_code/General/anomaly_log_colouring.py deleted file mode 100644 index d3f71b6ddc..0000000000 --- a/docs/iris/example_code/General/anomaly_log_colouring.py +++ /dev/null @@ -1,107 +0,0 @@ -""" -Colouring anomaly data with logarithmic scaling -=============================================== - -In this example, we need to plot anomaly data where the values have a -"logarithmic" significance -- i.e. we want to give approximately equal ranges -of colour between data values of, say, 1 and 10 as between 10 and 100. - -As the data range also contains zero, that obviously does not suit a simple -logarithmic interpretation. However, values of less than a certain absolute -magnitude may be considered "not significant", so we put these into a separate -"zero band" which is plotted in white. - -To do this, we create a custom value mapping function (normalization) using -the matplotlib Norm class `matplotlib.colours.SymLogNorm -`_. -We use this to make a cell-filled pseudocolour plot with a colorbar. - -NOTE: By "pseudocolour", we mean that each data point is drawn as a "cell" -region on the plot, coloured according to its data value. -This is provided in Iris by the functions :meth:`iris.plot.pcolor` and -:meth:`iris.plot.pcolormesh`, which call the underlying matplotlib -functions of the same names (i.e. `matplotlib.pyplot.pcolor -`_ -and `matplotlib.pyplot.pcolormesh -`_). -See also: http://en.wikipedia.org/wiki/False_color#Pseudocolor. - -""" -import cartopy.crs as ccrs -import iris -import iris.coord_categorisation -import iris.plot as iplt -import matplotlib.pyplot as plt -import matplotlib.colors as mcols - - -def main(): - # Load a sample air temperatures sequence. - file_path = iris.sample_data_path('E1_north_america.nc') - temperatures = iris.load_cube(file_path) - - # Create a year-number coordinate from the time information. - iris.coord_categorisation.add_year(temperatures, 'time') - - # Create a sample anomaly field for one chosen year, by extracting that - # year and subtracting the time mean. - sample_year = 1982 - year_temperature = temperatures.extract(iris.Constraint(year=sample_year)) - time_mean = temperatures.collapsed('time', iris.analysis.MEAN) - anomaly = year_temperature - time_mean - - # Construct a plot title string explaining which years are involved. - years = temperatures.coord('year').points - plot_title = 'Temperature anomaly' - plot_title += '\n{} differences from {}-{} average.'.format( - sample_year, years[0], years[-1]) - - # Define scaling levels for the logarithmic colouring. - minimum_log_level = 0.1 - maximum_scale_level = 3.0 - - # Use a standard colour map which varies blue-white-red. - # For suitable options, see the 'Diverging colormaps' section in: - # http://matplotlib.org/examples/color/colormaps_reference.html - anom_cmap = 'bwr' - - # Create a 'logarithmic' data normalization. - anom_norm = mcols.SymLogNorm(linthresh=minimum_log_level, - linscale=0, - vmin=-maximum_scale_level, - vmax=maximum_scale_level) - # Setting "linthresh=minimum_log_level" makes its non-logarithmic - # data range equal to our 'zero band'. - # Setting "linscale=0" maps the whole zero band to the middle colour value - # (i.e. 0.5), which is the neutral point of a "diverging" style colormap. - - # Create an Axes, specifying the map projection. - plt.axes(projection=ccrs.LambertConformal()) - - # Make a pseudocolour plot using this colour scheme. - mesh = iplt.pcolormesh(anomaly, cmap=anom_cmap, norm=anom_norm) - - # Add a colourbar, with extensions to show handling of out-of-range values. - bar = plt.colorbar(mesh, orientation='horizontal', extend='both') - - # Set some suitable fixed "logarithmic" colourbar tick positions. - tick_levels = [-3, -1, -0.3, 0.0, 0.3, 1, 3] - bar.set_ticks(tick_levels) - - # Modify the tick labels so that the centre one shows "+/-". - tick_levels[3] = r'$\pm${:g}'.format(minimum_log_level) - bar.set_ticklabels(tick_levels) - - # Label the colourbar to show the units. - bar.set_label('[{}, log scale]'.format(anomaly.units)) - - # Add coastlines and a title. - plt.gca().coastlines() - plt.title(plot_title) - - # Display the result. - iplt.show() - - -if __name__ == '__main__': - main() diff --git a/docs/iris/example_code/General/coriolis_plot.py b/docs/iris/example_code/General/coriolis_plot.py deleted file mode 100644 index 273faf6470..0000000000 --- a/docs/iris/example_code/General/coriolis_plot.py +++ /dev/null @@ -1,79 +0,0 @@ -""" -Deriving the Coriolis frequency over the globe -============================================== - -This code computes the Coriolis frequency and stores it in a cube with -associated metadata. It then plots the Coriolis frequency on an orthographic -projection. - -""" - -import cartopy.crs as ccrs -import iris -from iris.coord_systems import GeogCS -import iris.plot as iplt -import matplotlib.pyplot as plt -import numpy as np - - -def main(): - # Start with arrays for latitudes and longitudes, with a given number of - # coordinates in the arrays. - coordinate_points = 200 - longitudes = np.linspace(-180.0, 180.0, coordinate_points) - latitudes = np.linspace(-90.0, 90.0, coordinate_points) - lon2d, lat2d = np.meshgrid(longitudes, latitudes) - - # Omega is the Earth's rotation rate, expressed in radians per second - omega = 7.29e-5 - - # The data for our cube is the Coriolis frequency, - # `f = 2 * omega * sin(phi)`, which is computed for each grid point over - # the globe from the 2-dimensional latitude array. - data = 2. * omega * np.sin(np.deg2rad(lat2d)) - - # We now need to define a coordinate system for the plot. - # Here we'll use GeogCS; 6371229 is the radius of the Earth in metres. - cs = GeogCS(6371229) - - # The Iris coords module turns the latitude list into a coordinate array. - # Coords then applies an appropriate standard name and unit to it. - lat_coord = iris.coords.DimCoord(latitudes, - standard_name='latitude', - units='degrees', - coord_system=cs) - - # The above process is repeated for the longitude coordinates. - lon_coord = iris.coords.DimCoord(longitudes, - standard_name='longitude', - units='degrees', - coord_system=cs) - - # Now we add bounds to our latitude and longitude coordinates. - # We want simple, contiguous bounds for our regularly-spaced coordinate - # points so we use the guess_bounds() method of the coordinate. For more - # complex coordinates, we could derive and set the bounds manually. - lat_coord.guess_bounds() - lon_coord.guess_bounds() - - # Now we input our data array into the cube. - new_cube = iris.cube.Cube(data, - standard_name='coriolis_parameter', - units='s-1', - dim_coords_and_dims=[(lat_coord, 0), - (lon_coord, 1)]) - - # Now let's plot our cube, along with coastlines, a title and an - # appropriately-labelled colour bar: - ax = plt.axes(projection=ccrs.Orthographic()) - ax.coastlines(resolution='10m') - mesh = iplt.pcolormesh(new_cube, cmap='seismic') - tick_levels = [-0.00012, -0.00006, 0.0, 0.00006, 0.00012] - plt.colorbar(mesh, orientation='horizontal', label='s-1', - ticks=tick_levels, format='%.1e') - plt.title('Coriolis frequency') - plt.show() - - -if __name__ == '__main__': - main() diff --git a/docs/iris/example_code/General/cross_section.py b/docs/iris/example_code/General/cross_section.py deleted file mode 100644 index e0d05fb230..0000000000 --- a/docs/iris/example_code/General/cross_section.py +++ /dev/null @@ -1,42 +0,0 @@ -""" -Cross section plots -=================== - -This example demonstrates contour plots of a cross-sectioned multi-dimensional -cube which features a hybrid height vertical coordinate system. - -""" - -import matplotlib.pyplot as plt - -import iris -import iris.plot as iplt -import iris.quickplot as qplt - - -def main(): - # Load some test data. - fname = iris.sample_data_path('hybrid_height.nc') - theta = iris.load_cube(fname, 'air_potential_temperature') - - # Extract a single height vs longitude cross-section. N.B. This could - # easily be changed to extract a specific slice, or even to loop over *all* - # cross section slices. - cross_section = next(theta.slices(['grid_longitude', - 'model_level_number'])) - - qplt.contourf(cross_section, coords=['grid_longitude', 'altitude'], - cmap='RdBu_r') - iplt.show() - - # Now do the equivalent plot, only against model level - plt.figure() - - qplt.contourf(cross_section, - coords=['grid_longitude', 'model_level_number'], - cmap='RdBu_r') - iplt.show() - - -if __name__ == '__main__': - main() diff --git a/docs/iris/example_code/General/custom_aggregation.py b/docs/iris/example_code/General/custom_aggregation.py deleted file mode 100644 index d8df506469..0000000000 --- a/docs/iris/example_code/General/custom_aggregation.py +++ /dev/null @@ -1,94 +0,0 @@ -""" -Calculating a custom statistic -============================== - -This example shows how to define and use a custom -:class:`iris.analysis.Aggregator`, that provides a new statistical operator for -use with cube aggregation functions such as :meth:`~iris.cube.Cube.collapsed`, -:meth:`~iris.cube.Cube.aggregated_by` or -:meth:`~iris.cube.Cube.rolling_window`. - -In this case, we have a 240-year sequence of yearly average surface temperature -over North America, and we want to calculate in how many years these exceed a -certain temperature over a spell of 5 years or more. - -""" -import matplotlib.pyplot as plt -import numpy as np - -import iris -from iris.analysis import Aggregator -import iris.plot as iplt -import iris.quickplot as qplt -from iris.util import rolling_window - - -# Define a function to perform the custom statistical operation. -# Note: in order to meet the requirements of iris.analysis.Aggregator, it must -# do the calculation over an arbitrary (given) data axis. -def count_spells(data, threshold, axis, spell_length): - """ - Function to calculate the number of points in a sequence where the value - has exceeded a threshold value for at least a certain number of timepoints. - - Generalised to operate on multiple time sequences arranged on a specific - axis of a multidimensional array. - - Args: - - * data (array): - raw data to be compared with value threshold. - - * threshold (float): - threshold point for 'significant' datapoints. - - * axis (int): - number of the array dimension mapping the time sequences. - (Can also be negative, e.g. '-1' means last dimension) - - * spell_length (int): - number of consecutive times at which value > threshold to "count". - - """ - if axis < 0: - # just cope with negative axis numbers - axis += data.ndim - # Threshold the data to find the 'significant' points. - data_hits = data > threshold - # Make an array with data values "windowed" along the time axis. - hit_windows = rolling_window(data_hits, window=spell_length, axis=axis) - # Find the windows "full of True-s" (along the added 'window axis'). - full_windows = np.all(hit_windows, axis=axis+1) - # Count points fulfilling the condition (along the time axis). - spell_point_counts = np.sum(full_windows, axis=axis, dtype=int) - return spell_point_counts - - -def main(): - # Load the whole time-sequence as a single cube. - file_path = iris.sample_data_path('E1_north_america.nc') - cube = iris.load_cube(file_path) - - # Make an aggregator from the user function. - SPELL_COUNT = Aggregator('spell_count', - count_spells, - units_func=lambda units: 1) - - # Define the parameters of the test. - threshold_temperature = 280.0 - spell_years = 5 - - # Calculate the statistic. - warm_periods = cube.collapsed('time', SPELL_COUNT, - threshold=threshold_temperature, - spell_length=spell_years) - warm_periods.rename('Number of 5-year warm spells in 240 years') - - # Plot the results. - qplt.contourf(warm_periods, cmap='RdYlBu_r') - plt.gca().coastlines() - iplt.show() - - -if __name__ == '__main__': - main() diff --git a/docs/iris/example_code/General/custom_file_loading.py b/docs/iris/example_code/General/custom_file_loading.py deleted file mode 100644 index d6fddf8464..0000000000 --- a/docs/iris/example_code/General/custom_file_loading.py +++ /dev/null @@ -1,317 +0,0 @@ -""" -Loading a cube from a custom file format -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -This example shows how a custom text file can be loaded using the standard Iris -load mechanism. - -The first stage in the process is to define an Iris :class:`FormatSpecification -` for the file format. To create a -format specification we need to define the following: - -* format_name - Some text that describes the format specification we are - creating -* file_element - FileElement object describing the element which identifies - this FormatSpecification. - - Possible values are: - - ``iris.io.format_picker.MagicNumber(n, o)`` - The n bytes from the file at offset o. - - ``iris.io.format_picker.FileExtension()`` - The file's extension. - - ``iris.io.format_picker.LeadingLine()`` - The first line of the file. - -* file_element_value - The value that the file_element should take if a file - matches this FormatSpecification -* handler (optional) - A generator function that will be called when the file - specification has been identified. This function is provided by the user and - provides the means to parse the whole file. If no handler function is - provided, then identification is still possible without any handling. - - The handler function must define the following arguments: - - * list of filenames to process - * callback function - An optional function to filter/alter the Iris cubes - returned - - The handler function must be defined as generator which yields each cube as - they are produced. - -* priority (optional) - Integer giving a priority for considering this - specification where higher priority means sooner consideration - -In the following example, the function :func:`load_NAME_III` has been defined -to handle the loading of the raw data from the custom file format. This -function is called from :func:`NAME_to_cube` which uses this data to create and -yield Iris cubes. - -In the ``main()`` function the filenames are loaded via the ``iris.load_cube`` -function which automatically invokes the ``FormatSpecification`` we defined. -The cube returned from the load function is then used to produce a plot. - -""" -import datetime - -import matplotlib.pyplot as plt -import numpy as np - -from cf_units import Unit, CALENDAR_GREGORIAN - -import iris -import iris.coords as icoords -import iris.coord_systems as icoord_systems -import iris.fileformats -import iris.io.format_picker as format_picker -import iris.plot as iplt - - -UTC_format = '%H%M%Z %d/%m/%Y' - -FLOAT_HEADERS = ['X grid origin', 'Y grid origin', - 'X grid resolution', 'Y grid resolution'] -INT_HEADERS = ['X grid size', 'Y grid size', 'Number of fields'] -DATE_HEADERS = ['Run time', 'Start of release', 'End of release'] -COLUMN_NAMES = ['species_category', 'species', 'cell_measure', 'quantity', - 'unit', 'z_level', 'time'] - - -def load_NAME_III(filename): - """ - Loads the Met Office's NAME III grid output files returning headers, column - definitions and data arrays as 3 separate lists. - - """ - - # Loading a file gives a generator of lines which can be progressed using - # the next() function. This will come in handy as we wish to progress - # through the file line by line. - with open(filename) as file_handle: - # Define a dictionary which can hold the header metadata for this file. - headers = {} - - # Skip the NAME header of the file which looks something like - # 'NAME III (version X.X.X)'. - next(file_handle) - - # Read the next 16 lines of header information, putting the form - # "header name: header value" into a dictionary. - for _ in range(16): - header_name, header_value = next(file_handle).split(':') - - # Strip off any spurious space characters in the header name and - # value. - header_name = header_name.strip() - header_value = header_value.strip() - - # Cast some headers into floats or integers if they match a given - # header name. - if header_name in FLOAT_HEADERS: - header_value = float(header_value) - elif header_name in INT_HEADERS: - header_value = int(header_value) - elif header_name in DATE_HEADERS: - # convert the time to python datetimes - header_value = datetime.datetime.strptime(header_value, - UTC_format) - - headers[header_name] = header_value - - # Skip the next blank line in the file. - next(file_handle) - - # Read the next 7 lines of column definitions. - column_headings = {} - for column_header_name in COLUMN_NAMES: - column_headings[column_header_name] = [ - col.strip() for col in next(file_handle).split(',') - ][:-1] - - # Convert the time to python datetimes. - new_time_column_header = [] - for i, t in enumerate(column_headings['time']): - # The first 4 columns aren't time at all, so don't convert them to - # datetimes. - if i >= 4: - t = datetime.datetime.strptime(t, UTC_format) - new_time_column_header.append(t) - column_headings['time'] = new_time_column_header - - # Skip the blank line after the column headers. - next(file_handle) - - # Make a list of data arrays to hold the data for each column. - data_shape = (headers['Y grid size'], headers['X grid size']) - data_arrays = [np.zeros(data_shape, dtype=np.float32) - for i in range(headers['Number of fields'])] - - # Iterate over the remaining lines which represent the data in a column - # form. - for line in file_handle: - # Split the line by comma, removing the last empty column caused by - # the trailing comma. - vals = line.split(',')[:-1] - - # Cast the x and y grid positions to floats and convert them to - # zero based indices (the numbers are 1 based grid positions where - # 0.5 represents half a grid point.) - x = int(float(vals[0]) - 1.5) - y = int(float(vals[1]) - 1.5) - - # Populate the data arrays (i.e. all columns but the leading 4). - for i, data_array in enumerate(data_arrays): - data_array[y, x] = float(vals[i + 4]) - - return headers, column_headings, data_arrays - - -def NAME_to_cube(filenames, callback): - """ - Returns a generator of cubes given a list of filenames and a callback. - """ - - for filename in filenames: - header, column_headings, data_arrays = load_NAME_III(filename) - - for i, data_array in enumerate(data_arrays): - # turn the dictionary of column headers with a list of header - # information for each field into a dictionary of headers for just - # this field. Ignore the first 4 columns of grid position (data was - # located with the data array). - field_headings = dict((k, v[i + 4]) - for k, v in column_headings.items()) - - # make an cube - cube = iris.cube.Cube(data_array) - - # define the name and unit - name = ('%s %s' % (field_headings['species'], - field_headings['quantity'])) - name = name.upper().replace(' ', '_') - cube.rename(name) - # Some units are badly encoded in the file, fix this by putting a - # space in between. (if gs is not found, then the string will be - # returned unchanged) - cube.units = field_headings['unit'].replace('gs', 'g s') - - # define and add the singular coordinates of the field (flight - # level, time etc.) - cube.add_aux_coord(icoords.AuxCoord(field_headings['z_level'], - long_name='flight_level', - units='1')) - - # define the time unit and use it to serialise the datetime for the - # time coordinate - time_unit = Unit('hours since epoch', calendar=CALENDAR_GREGORIAN) - time_coord = icoords.AuxCoord( - time_unit.date2num(field_headings['time']), - standard_name='time', - units=time_unit) - cube.add_aux_coord(time_coord) - - # build a coordinate system which can be referenced by latitude and - # longitude coordinates - lat_lon_coord_system = icoord_systems.GeogCS(6371229) - - # build regular latitude and longitude coordinates which have - # bounds - start = header['X grid origin'] + header['X grid resolution'] - step = header['X grid resolution'] - count = header['X grid size'] - pts = start + np.arange(count, dtype=np.float32) * step - lon_coord = icoords.DimCoord(pts, standard_name='longitude', - units='degrees', - coord_system=lat_lon_coord_system) - lon_coord.guess_bounds() - - start = header['Y grid origin'] + header['Y grid resolution'] - step = header['Y grid resolution'] - count = header['Y grid size'] - pts = start + np.arange(count, dtype=np.float32) * step - lat_coord = icoords.DimCoord(pts, standard_name='latitude', - units='degrees', - coord_system=lat_lon_coord_system) - lat_coord.guess_bounds() - - # add the latitude and longitude coordinates to the cube, with - # mappings to data dimensions - cube.add_dim_coord(lat_coord, 0) - cube.add_dim_coord(lon_coord, 1) - - # implement standard iris callback capability. Although callbacks - # are not used in this example, the standard mechanism for a custom - # loader to implement a callback is shown: - cube = iris.io.run_callback(callback, cube, - [header, field_headings, data_array], - filename) - - # yield the cube created (the loop will continue when the next() - # element is requested) - yield cube - - -# Create a format_picker specification of the NAME file format giving it a -# priority greater than the built in NAME loader. -_NAME_III_spec = format_picker.FormatSpecification( - 'Name III', - format_picker.LeadingLine(), - lambda line: line.startswith(b"NAME III"), - NAME_to_cube, - priority=6) - -# Register the NAME loader with iris -iris.fileformats.FORMAT_AGENT.add_spec(_NAME_III_spec) - - -# --------------------------------------------- -# | Using the new loader | -# --------------------------------------------- - -def main(): - fname = iris.sample_data_path('NAME_output.txt') - - boundary_volc_ash_constraint = iris.Constraint( - 'VOLCANIC_ASH_AIR_CONCENTRATION', - flight_level='From FL000 - FL200') - - # Callback shown as None to illustrate where a cube-level callback function - # would be used if required - cube = iris.load_cube(fname, boundary_volc_ash_constraint, callback=None) - - # draw contour levels for the data (the top level is just a catch-all) - levels = (0.0002, 0.002, 0.004, 1e10) - cs = iplt.contourf(cube, levels=levels, - colors=('#80ffff', '#939598', '#e00404'), - ) - - # draw a black outline at the lowest contour to highlight affected areas - iplt.contour(cube, levels=(levels[0], 100), - colors='black') - - # set an extent and a background image for the map - ax = plt.gca() - ax.set_extent((-90, 20, 20, 75)) - ax.stock_img('ne_shaded') - - # make a legend, with custom labels, for the coloured contour set - artists, _ = cs.legend_elements() - labels = [ - r'$%s < x \leq %s$' % (levels[0], levels[1]), - r'$%s < x \leq %s$' % (levels[1], levels[2]), - r'$x > %s$' % levels[2] - ] - ax.legend(artists, labels, title='Ash concentration / g m-3', - loc='upper left') - - time = cube.coord('time') - time_date = time.units.num2date(time.points[0]).strftime(UTC_format) - plt.title('Volcanic ash concentration forecast\nvalid at %s' % time_date) - - iplt.show() - - -if __name__ == '__main__': - main() diff --git a/docs/iris/example_code/General/global_map.py b/docs/iris/example_code/General/global_map.py deleted file mode 100644 index 4ed8b97443..0000000000 --- a/docs/iris/example_code/General/global_map.py +++ /dev/null @@ -1,35 +0,0 @@ -""" -Quickplot of a 2d cube on a map -=============================== - -This example demonstrates a contour plot of global air temperature. The plot -title and the labels for the axes are automatically derived from the metadata. - -""" -import cartopy.crs as ccrs -import matplotlib.pyplot as plt - -import iris -import iris.plot as iplt -import iris.quickplot as qplt - - -def main(): - fname = iris.sample_data_path('air_temp.pp') - temperature = iris.load_cube(fname) - - # Plot #1: contourf with axes longitude from -180 to 180 - plt.figure(figsize=(12, 5)) - plt.subplot(121) - qplt.contourf(temperature, 15) - plt.gca().coastlines() - - # Plot #2: contourf with axes longitude from 0 to 360 - proj = ccrs.PlateCarree(central_longitude=-180.0) - plt.subplot(122, projection=proj) - qplt.contourf(temperature, 15) - plt.gca().coastlines() - iplt.show() - -if __name__ == '__main__': - main() diff --git a/docs/iris/example_code/General/inset_plot.py b/docs/iris/example_code/General/inset_plot.py deleted file mode 100644 index f2ae2d1155..0000000000 --- a/docs/iris/example_code/General/inset_plot.py +++ /dev/null @@ -1,66 +0,0 @@ -""" -Test Data Showing Inset Plots -============================= - -This example demonstrates the use of a single 3D data cube with time, latitude -and longitude dimensions to plot a temperature series for a single latitude -coordinate, with an inset plot of the data region. - -""" - -import matplotlib.pyplot as plt -import numpy as np -import iris -import cartopy.crs as ccrs -import iris.quickplot as qplt -import iris.plot as iplt - - -def main(): - cube1 = iris.load_cube(iris.sample_data_path('ostia_monthly.nc')) - # Slice into cube to retrieve data for the inset map showing the - # data region - region = cube1[-1, :, :] - # Average over latitude to reduce cube to 1 dimension - plot_line = region.collapsed('latitude', iris.analysis.MEAN) - - # Open a window for plotting - fig = plt.figure() - # Add a single subplot (axes). Could also use "ax_main = plt.subplot()" - ax_main = fig.add_subplot(1, 1, 1) - # Produce a quick plot of the 1D cube - qplt.plot(plot_line) - - # Set x limits to match the data - ax_main.set_xlim(0, plot_line.coord('longitude').points.max()) - # Adjust the y limits so that the inset map won't clash with main plot - ax_main.set_ylim(294, 310) - ax_main.set_title('Meridional Mean Temperature') - # Add grid lines - ax_main.grid() - - # Add a second set of axes specifying the fractional coordinates within - # the figure with bottom left corner at x=0.55, y=0.58 with width - # 0.3 and height 0.25. - # Also specify the projection - ax_sub = fig.add_axes([0.55, 0.58, 0.3, 0.25], - projection=ccrs.Mollweide(central_longitude=180)) - - # Use iris.plot (iplt) here so colour bar properties can be specified - # Also use a sequential colour scheme to reduce confusion for those with - # colour-blindness - iplt.pcolormesh(region, cmap='Blues') - # Manually set the orientation and tick marks on your colour bar - ticklist = np.linspace(np.min(region.data), np.max(region.data), 4) - plt.colorbar(orientation='horizontal', ticks=ticklist) - ax_sub.set_title('Data Region') - # Add coastlines - ax_sub.coastlines() - # request to show entire map, using the colour mesh on the data region only - ax_sub.set_global() - - qplt.show() - - -if __name__ == '__main__': - main() diff --git a/docs/iris/example_code/General/lineplot_with_legend.py b/docs/iris/example_code/General/lineplot_with_legend.py deleted file mode 100644 index 4ce80a9569..0000000000 --- a/docs/iris/example_code/General/lineplot_with_legend.py +++ /dev/null @@ -1,47 +0,0 @@ -""" -Multi-line temperature profile plot -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -""" -import matplotlib.pyplot as plt - -import iris -import iris.plot as iplt -import iris.quickplot as qplt - - -def main(): - fname = iris.sample_data_path('air_temp.pp') - - # Load exactly one cube from the given file. - temperature = iris.load_cube(fname) - - # We only want a small number of latitudes, so filter some out - # using "extract". - temperature = temperature.extract( - iris.Constraint(latitude=lambda cell: 68 <= cell < 78)) - - for cube in temperature.slices('longitude'): - - # Create a string label to identify this cube (i.e. latitude: value). - cube_label = 'latitude: %s' % cube.coord('latitude').points[0] - - # Plot the cube, and associate it with a label. - qplt.plot(cube, label=cube_label) - - # Add the legend with 2 columns. - plt.legend(ncol=2) - - # Put a grid on the plot. - plt.grid(True) - - # Tell matplotlib not to extend the plot axes range to nicely - # rounded numbers. - plt.axis('tight') - - # Finally, show it. - iplt.show() - - -if __name__ == '__main__': - main() diff --git a/docs/iris/example_code/General/orca_projection.py b/docs/iris/example_code/General/orca_projection.py deleted file mode 100644 index fb44221221..0000000000 --- a/docs/iris/example_code/General/orca_projection.py +++ /dev/null @@ -1,58 +0,0 @@ -""" -Tri-Polar Grid Projected Plotting -================================= - -This example demonstrates cell plots of data on the semi-structured ORCA2 model -grid. - -First, the data is projected into the PlateCarree coordinate reference system. - -Second four pcolormesh plots are created from this projected dataset, -using different projections for the output image. - -""" - -import matplotlib.pyplot as plt - -import cartopy.crs as ccrs -import iris -import iris.analysis.cartography -import iris.plot as iplt -import iris.quickplot as qplt - - -def main(): - # Load data - filepath = iris.sample_data_path('orca2_votemper.nc') - cube = iris.load_cube(filepath) - - # Choose plot projections - projections = {} - projections['Mollweide'] = ccrs.Mollweide() - projections['PlateCarree'] = ccrs.PlateCarree() - projections['NorthPolarStereo'] = ccrs.NorthPolarStereo() - projections['Orthographic'] = ccrs.Orthographic(central_longitude=-90, - central_latitude=45) - - pcarree = projections['PlateCarree'] - # Transform cube to target projection - new_cube, extent = iris.analysis.cartography.project(cube, pcarree, - nx=400, ny=200) - - # Plot data in each projection - for name in sorted(projections): - fig = plt.figure() - fig.suptitle('ORCA2 Data Projected to {}'.format(name)) - # Set up axes and title - ax = plt.subplot(projection=projections[name]) - # Set limits - ax.set_global() - # plot with Iris quickplot pcolormesh - qplt.pcolormesh(new_cube) - # Draw coastlines - ax.coastlines() - - iplt.show() - -if __name__ == '__main__': - main() diff --git a/docs/iris/example_code/General/polar_stereo.py b/docs/iris/example_code/General/polar_stereo.py deleted file mode 100644 index f1cefdc903..0000000000 --- a/docs/iris/example_code/General/polar_stereo.py +++ /dev/null @@ -1,28 +0,0 @@ -""" -Example of a polar stereographic plot -===================================== - -Demonstrates plotting data that are defined on a polar stereographic -projection. - -""" - -import matplotlib.pyplot as plt - -import iris -import iris.plot as iplt -import iris.quickplot as qplt - - -def main(): - file_path = iris.sample_data_path('polar_stereo.grib2') - cube = iris.load_cube(file_path) - qplt.contourf(cube) - ax = plt.gca() - ax.coastlines() - ax.gridlines() - iplt.show() - - -if __name__ == '__main__': - main() diff --git a/docs/iris/example_code/General/polynomial_fit.py b/docs/iris/example_code/General/polynomial_fit.py deleted file mode 100644 index 84f3265dd1..0000000000 --- a/docs/iris/example_code/General/polynomial_fit.py +++ /dev/null @@ -1,52 +0,0 @@ -""" -Fitting a polynomial -==================== - -This example demonstrates computing a polynomial fit to 1D data from an Iris -cube, adding the fit to the cube's metadata, and plotting both the 1D data and -the fit. - -""" - -import matplotlib.pyplot as plt -import numpy as np - -import iris -import iris.quickplot as qplt - - -def main(): - # Load some test data. - fname = iris.sample_data_path('A1B_north_america.nc') - cube = iris.load_cube(fname) - - # Extract a single time series at a latitude and longitude point. - location = next(cube.slices(['time'])) - - # Calculate a polynomial fit to the data at this time series. - x_points = location.coord('time').points - y_points = location.data - degree = 2 - - p = np.polyfit(x_points, y_points, degree) - y_fitted = np.polyval(p, x_points) - - # Add the polynomial fit values to the time series to take - # full advantage of Iris plotting functionality. - long_name = 'degree_{}_polynomial_fit_of_{}'.format(degree, cube.name()) - fit = iris.coords.AuxCoord(y_fitted, long_name=long_name, - units=location.units) - location.add_aux_coord(fit, 0) - - qplt.plot(location.coord('time'), location, label='data') - qplt.plot(location.coord('time'), - location.coord(long_name), - 'g-', label='polynomial fit') - plt.legend(loc='best') - plt.title('Trend of US air temperature over time') - - qplt.show() - - -if __name__ == '__main__': - main() diff --git a/docs/iris/example_code/General/projections_and_annotations.py b/docs/iris/example_code/General/projections_and_annotations.py deleted file mode 100644 index b4cb8b1eb7..0000000000 --- a/docs/iris/example_code/General/projections_and_annotations.py +++ /dev/null @@ -1,115 +0,0 @@ -""" -Plotting in different projections -================================= - -This example shows how to overlay data and graphics in different projections, -demonstrating various features of Iris, Cartopy and matplotlib. - -We wish to overlay two datasets, defined on different rotated-pole grids. -To display both together, we make a pseudocoloured plot of the first, overlaid -with contour lines from the second. -We also add some lines and text annotations drawn in various projections. - -We plot these over a specified region, in two different map projections. - -""" -import cartopy.crs as ccrs -import iris -import iris.plot as iplt -import numpy as np -import matplotlib.pyplot as plt - - -# Define a Cartopy 'ordinary' lat-lon coordinate reference system. -crs_latlon = ccrs.PlateCarree() - - -def make_plot(projection_name, projection_crs): - - # Create a matplotlib Figure. - plt.figure() - - # Add a matplotlib Axes, specifying the required display projection. - # NOTE: specifying 'projection' (a "cartopy.crs.Projection") makes the - # resulting Axes a "cartopy.mpl.geoaxes.GeoAxes", which supports plotting - # in different coordinate systems. - ax = plt.axes(projection=projection_crs) - - # Set display limits to include a set region of latitude * longitude. - # (Note: Cartopy-specific). - ax.set_extent((-80.0, 20.0, 10.0, 80.0), crs=crs_latlon) - - # Add coastlines and meridians/parallels (Cartopy-specific). - ax.coastlines(linewidth=0.75, color='navy') - ax.gridlines(crs=crs_latlon, linestyle='-') - - # Plot the first dataset as a pseudocolour filled plot. - maindata_filepath = iris.sample_data_path('rotated_pole.nc') - main_data = iris.load_cube(maindata_filepath) - # NOTE: iplt.pcolormesh calls "pyplot.pcolormesh", passing in a coordinate - # system with the 'transform' keyword: This enables the Axes (a cartopy - # GeoAxes) to reproject the plot into the display projection. - iplt.pcolormesh(main_data, cmap='RdBu_r') - - # Overplot the other dataset (which has a different grid), as contours. - overlay_filepath = iris.sample_data_path('space_weather.nc') - overlay_data = iris.load_cube(overlay_filepath, 'total electron content') - # NOTE: as above, "iris.plot.contour" calls "pyplot.contour" with a - # 'transform' keyword, enabling Cartopy reprojection. - iplt.contour(overlay_data, 20, - linewidths=2.0, colors='darkgreen', linestyles='-') - - # Draw a margin line, some way in from the border of the 'main' data... - # First calculate rectangle corners, 7% in from each corner of the data. - x_coord, y_coord = main_data.coord(axis='x'), main_data.coord(axis='y') - x_start, x_end = np.min(x_coord.points), np.max(x_coord.points) - y_start, y_end = np.min(y_coord.points), np.max(y_coord.points) - margin = 0.07 - margin_fractions = np.array([margin, 1.0 - margin]) - x_lower, x_upper = x_start + (x_end - x_start) * margin_fractions - y_lower, y_upper = y_start + (y_end - y_start) * margin_fractions - box_x_points = x_lower + (x_upper - x_lower) * np.array([0, 1, 1, 0, 0]) - box_y_points = y_lower + (y_upper - y_lower) * np.array([0, 0, 1, 1, 0]) - # Get the Iris coordinate sytem of the X coordinate (Y should be the same). - cs_data1 = x_coord.coord_system - # Construct an equivalent Cartopy coordinate reference system ("crs"). - crs_data1 = cs_data1.as_cartopy_crs() - # Draw the rectangle in this crs, with matplotlib "pyplot.plot". - # NOTE: the 'transform' keyword specifies a non-display coordinate system - # for the plot points (as used by the "iris.plot" functions). - plt.plot(box_x_points, box_y_points, transform=crs_data1, - linewidth=2.0, color='white', linestyle='--') - - # Mark some particular places with a small circle and a name label... - # Define some test points with latitude and longitude coordinates. - city_data = [('London', 51.5072, 0.1275), - ('Halifax, NS', 44.67, -63.61), - ('Reykjavik', 64.1333, -21.9333)] - # Place a single marker point and a text annotation at each place. - for name, lat, lon in city_data: - plt.plot(lon, lat, marker='o', markersize=7.0, markeredgewidth=2.5, - markerfacecolor='black', markeredgecolor='white', - transform=crs_latlon) - # NOTE: the "plt.annotate call" does not have a "transform=" keyword, - # so for this one we transform the coordinates with a Cartopy call. - at_x, at_y = ax.projection.transform_point(lon, lat, - src_crs=crs_latlon) - plt.annotate( - name, xy=(at_x, at_y), xytext=(30, 20), textcoords='offset points', - color='black', backgroundcolor='white', size='large', - arrowprops=dict(arrowstyle='->', color='white', linewidth=2.5)) - - # Add a title, and display. - plt.title('A pseudocolour plot on the {} projection,\n' - 'with overlaid contours.'.format(projection_name)) - iplt.show() - - -def main(): - # Demonstrate with two different display projections. - make_plot('Equidistant Cylindrical', ccrs.PlateCarree()) - make_plot('North Polar Stereographic', ccrs.NorthPolarStereo()) - - -if __name__ == '__main__': - main() diff --git a/docs/iris/example_code/General/rotated_pole_mapping.py b/docs/iris/example_code/General/rotated_pole_mapping.py deleted file mode 100644 index e175e6fe74..0000000000 --- a/docs/iris/example_code/General/rotated_pole_mapping.py +++ /dev/null @@ -1,64 +0,0 @@ -""" -Rotated pole mapping -===================== - -This example uses several visualisation methods to achieve an array of -differing images, including: - - * Visualisation of point based data - * Contouring of point based data - * Block plot of contiguous bounded data - * Non native projection and a Natural Earth shaded relief image underlay - -""" -import cartopy.crs as ccrs -import matplotlib.pyplot as plt - -import iris -import iris.plot as iplt -import iris.quickplot as qplt -import iris.analysis.cartography - - -def main(): - # Load some test data. - fname = iris.sample_data_path('rotated_pole.nc') - air_pressure = iris.load_cube(fname) - - # Plot #1: Point plot showing data values & a colorbar - plt.figure() - points = qplt.points(air_pressure, c=air_pressure.data) - cb = plt.colorbar(points, orientation='horizontal') - cb.set_label(air_pressure.units) - plt.gca().coastlines() - iplt.show() - - # Plot #2: Contourf of the point based data - plt.figure() - qplt.contourf(air_pressure, 15) - plt.gca().coastlines() - iplt.show() - - # Plot #3: Contourf overlayed by coloured point data - plt.figure() - qplt.contourf(air_pressure) - iplt.points(air_pressure, c=air_pressure.data) - plt.gca().coastlines() - iplt.show() - - # For the purposes of this example, add some bounds to the latitude - # and longitude - air_pressure.coord('grid_latitude').guess_bounds() - air_pressure.coord('grid_longitude').guess_bounds() - - # Plot #4: Block plot - plt.figure() - plt.axes(projection=ccrs.PlateCarree()) - iplt.pcolormesh(air_pressure) - plt.gca().stock_img() - plt.gca().coastlines() - iplt.show() - - -if __name__ == '__main__': - main() diff --git a/docs/iris/example_code/Meteorology/COP_1d_plot.py b/docs/iris/example_code/Meteorology/COP_1d_plot.py deleted file mode 100644 index 53bff28b7e..0000000000 --- a/docs/iris/example_code/Meteorology/COP_1d_plot.py +++ /dev/null @@ -1,108 +0,0 @@ -""" -Global average annual temperature plot -====================================== - -Produces a time-series plot of North American temperature forecasts for 2 -different emission scenarios. Constraining data to a limited spatial area also -features in this example. - -The data used comes from the HadGEM2-AO model simulations for the A1B and E1 -scenarios, both of which were derived using the IMAGE Integrated Assessment -Model (Johns et al. 2011; Lowe et al. 2009). - -References ----------- - - Johns T.C., et al. (2011) Climate change under aggressive mitigation: the - ENSEMBLES multi-model experiment. Climate Dynamics, Vol 37, No. 9-10, - doi:10.1007/s00382-011-1005-5. - - Lowe J.A., C.D. Hewitt, D.P. Van Vuuren, T.C. Johns, E. Stehfest, J-F. - Royer, and P. van der Linden, 2009. New Study For Climate Modeling, - Analyses, and Scenarios. Eos Trans. AGU, Vol 90, No. 21, - doi:10.1029/2009EO210001. - -.. seealso:: - - Further details on the aggregation functionality being used in this example - can be found in :ref:`cube-statistics`. - -""" -import numpy as np -import matplotlib.pyplot as plt -import iris -import iris.plot as iplt -import iris.quickplot as qplt - -import iris.analysis.cartography - - -def main(): - # Load data into three Cubes, one for each set of NetCDF files. - e1 = iris.load_cube(iris.sample_data_path('E1_north_america.nc')) - - a1b = iris.load_cube(iris.sample_data_path('A1B_north_america.nc')) - - # load in the global pre-industrial mean temperature, and limit the domain - # to the same North American region that e1 and a1b are at. - north_america = iris.Constraint(longitude=lambda v: 225 <= v <= 315, - latitude=lambda v: 15 <= v <= 60) - pre_industrial = iris.load_cube(iris.sample_data_path('pre-industrial.pp'), - north_america) - - # Generate area-weights array. As e1 and a1b are on the same grid we can - # do this just once and re-use. This method requires bounds on lat/lon - # coords, so let's add some in sensible locations using the "guess_bounds" - # method. - e1.coord('latitude').guess_bounds() - e1.coord('longitude').guess_bounds() - e1_grid_areas = iris.analysis.cartography.area_weights(e1) - pre_industrial.coord('latitude').guess_bounds() - pre_industrial.coord('longitude').guess_bounds() - pre_grid_areas = iris.analysis.cartography.area_weights(pre_industrial) - - # Perform the area-weighted mean for each of the datasets using the - # computed grid-box areas. - pre_industrial_mean = pre_industrial.collapsed(['latitude', 'longitude'], - iris.analysis.MEAN, - weights=pre_grid_areas) - e1_mean = e1.collapsed(['latitude', 'longitude'], - iris.analysis.MEAN, - weights=e1_grid_areas) - a1b_mean = a1b.collapsed(['latitude', 'longitude'], - iris.analysis.MEAN, - weights=e1_grid_areas) - - # Plot the datasets - qplt.plot(e1_mean, label='E1 scenario', lw=1.5, color='blue') - qplt.plot(a1b_mean, label='A1B-Image scenario', lw=1.5, color='red') - - # Draw a horizontal line showing the pre-industrial mean - plt.axhline(y=pre_industrial_mean.data, color='gray', linestyle='dashed', - label='pre-industrial', lw=1.5) - - # Constrain the period 1860-1999 and extract the observed data from a1b - constraint = iris.Constraint(time=lambda - cell: 1860 <= cell.point.year <= 1999) - observed = a1b_mean.extract(constraint) - # Assert that this data set is the same as the e1 scenario: - # they share data up to the 1999 cut off. - assert np.all(np.isclose(observed.data, - e1_mean.extract(constraint).data)) - - # Plot the observed data - qplt.plot(observed, label='observed', color='black', lw=1.5) - - # Add a legend and title - plt.legend(loc="upper left") - plt.title('North American mean air temperature', fontsize=18) - - plt.xlabel('Time / year') - - plt.grid() - - iplt.show() - - -if __name__ == '__main__': - main() diff --git a/docs/iris/example_code/Meteorology/COP_maps.py b/docs/iris/example_code/Meteorology/COP_maps.py deleted file mode 100644 index aa5049feb9..0000000000 --- a/docs/iris/example_code/Meteorology/COP_maps.py +++ /dev/null @@ -1,149 +0,0 @@ -""" -Global average annual temperature maps -====================================== - -Produces maps of global temperature forecasts from the A1B and E1 scenarios. - -The data used comes from the HadGEM2-AO model simulations for the A1B and E1 -scenarios, both of which were derived using the IMAGE Integrated Assessment -Model (Johns et al. 2011; Lowe et al. 2009). - -References ----------- - - Johns T.C., et al. (2011) Climate change under aggressive mitigation: the - ENSEMBLES multi-model experiment. Climate Dynamics, Vol 37, No. 9-10, - doi:10.1007/s00382-011-1005-5. - - Lowe J.A., C.D. Hewitt, D.P. Van Vuuren, T.C. Johns, E. Stehfest, J-F. - Royer, and P. van der Linden, 2009. New Study For Climate Modeling, - Analyses, and Scenarios. Eos Trans. AGU, Vol 90, No. 21, - doi:10.1029/2009EO210001. - -""" -from six.moves import zip - -import os.path -import matplotlib.pyplot as plt -import numpy as np - -import iris -import iris.coords as coords -import iris.plot as iplt - - -def cop_metadata_callback(cube, field, filename): - """ - A function which adds an "Experiment" coordinate which comes from the - filename. - """ - - # Extract the experiment name (such as a1b or e1) from the filename (in - # this case it is just the parent folder's name) - containing_folder = os.path.dirname(filename) - experiment_label = os.path.basename(containing_folder) - - # Create a coordinate with the experiment label in it - exp_coord = coords.AuxCoord(experiment_label, long_name='Experiment', - units='no_unit') - - # and add it to the cube - cube.add_aux_coord(exp_coord) - - -def main(): - # Load e1 and a1 using the callback to update the metadata - e1 = iris.load_cube(iris.sample_data_path('E1.2098.pp'), - callback=cop_metadata_callback) - a1b = iris.load_cube(iris.sample_data_path('A1B.2098.pp'), - callback=cop_metadata_callback) - - # Load the global average data and add an 'Experiment' coord it - global_avg = iris.load_cube(iris.sample_data_path('pre-industrial.pp')) - - # Define evenly spaced contour levels: -2.5, -1.5, ... 15.5, 16.5 with the - # specific colours - levels = np.arange(20) - 2.5 - red = np.array([0, 0, 221, 239, 229, 217, 239, 234, 228, 222, 205, 196, - 161, 137, 116, 89, 77, 60, 51]) / 256. - green = np.array([16, 217, 242, 243, 235, 225, 190, 160, 128, 87, 72, 59, - 33, 21, 29, 30, 30, 29, 26]) / 256. - blue = np.array([255, 255, 243, 169, 99, 51, 63, 37, 39, 21, 27, 23, 22, - 26, 29, 28, 27, 25, 22]) / 256. - - # Put those colours into an array which can be passed to contourf as the - # specific colours for each level - colors = np.array([red, green, blue]).T - - # Subtract the global - - # Iterate over each latitude longitude slice for both e1 and a1b scenarios - # simultaneously - for e1_slice, a1b_slice in zip(e1.slices(['latitude', 'longitude']), - a1b.slices(['latitude', 'longitude'])): - - time_coord = a1b_slice.coord('time') - - # Calculate the difference from the mean - delta_e1 = e1_slice - global_avg - delta_a1b = a1b_slice - global_avg - - # Make a wider than normal figure to house two maps side-by-side - fig = plt.figure(figsize=(12, 5)) - - # Get the time datetime from the coordinate - time = time_coord.units.num2date(time_coord.points[0]) - # Set a title for the entire figure, giving the time in a nice format - # of "MonthName Year". Also, set the y value for the title so that it - # is not tight to the top of the plot. - fig.suptitle( - 'Annual Temperature Predictions for ' + time.strftime("%Y"), - y=0.9, - fontsize=18) - - # Add the first subplot showing the E1 scenario - plt.subplot(121) - plt.title('HadGEM2 E1 Scenario', fontsize=10) - iplt.contourf(delta_e1, levels, colors=colors, extend='both') - plt.gca().coastlines() - # get the current axes' subplot for use later on - plt1_ax = plt.gca() - - # Add the second subplot showing the A1B scenario - plt.subplot(122) - plt.title('HadGEM2 A1B-Image Scenario', fontsize=10) - contour_result = iplt.contourf(delta_a1b, levels, colors=colors, - extend='both') - plt.gca().coastlines() - # get the current axes' subplot for use later on - plt2_ax = plt.gca() - - # Now add a colourbar who's leftmost point is the same as the leftmost - # point of the left hand plot and rightmost point is the rightmost - # point of the right hand plot - - # Get the positions of the 2nd plot and the left position of the 1st - # plot - left, bottom, width, height = plt2_ax.get_position().bounds - first_plot_left = plt1_ax.get_position().bounds[0] - - # the width of the colorbar should now be simple - width = left - first_plot_left + width - - # Add axes to the figure, to place the colour bar - colorbar_axes = fig.add_axes([first_plot_left, bottom + 0.07, - width, 0.03]) - - # Add the colour bar - cbar = plt.colorbar(contour_result, colorbar_axes, - orientation='horizontal') - - # Label the colour bar and add ticks - cbar.set_label(e1_slice.units) - cbar.ax.tick_params(length=0) - - iplt.show() - - -if __name__ == '__main__': - main() diff --git a/docs/iris/example_code/Meteorology/TEC.py b/docs/iris/example_code/Meteorology/TEC.py deleted file mode 100644 index 43f69fcea0..0000000000 --- a/docs/iris/example_code/Meteorology/TEC.py +++ /dev/null @@ -1,41 +0,0 @@ -""" -Ionosphere space weather -======================== - -This space weather example plots a filled contour of rotated pole point -data with a shaded relief image underlay. The plot shows aggregated -vertical electron content in the ionosphere. - -The plot exhibits an interesting outline effect due to excluding data -values below a certain threshold. - -""" - -import matplotlib.pyplot as plt -import numpy.ma as ma - -import iris -import iris.plot as iplt -import iris.quickplot as qplt - - -def main(): - # Load the "total electron content" cube. - filename = iris.sample_data_path('space_weather.nc') - cube = iris.load_cube(filename, 'total electron content') - - # Explicitly mask negative electron content. - cube.data = ma.masked_less(cube.data, 0) - - # Plot the cube using one hundred colour levels. - qplt.contourf(cube, 100) - plt.title('Total Electron Content') - plt.xlabel('longitude / degrees') - plt.ylabel('latitude / degrees') - plt.gca().stock_img() - plt.gca().coastlines() - iplt.show() - - -if __name__ == '__main__': - main() diff --git a/docs/iris/example_code/Meteorology/__init__.py b/docs/iris/example_code/Meteorology/__init__.py deleted file mode 100644 index 39c05d08c6..0000000000 --- a/docs/iris/example_code/Meteorology/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -""" -Meteorology visualisation examples -================================== -""" diff --git a/docs/iris/example_code/Meteorology/deriving_phenomena.py b/docs/iris/example_code/Meteorology/deriving_phenomena.py deleted file mode 100644 index d34a2c8b78..0000000000 --- a/docs/iris/example_code/Meteorology/deriving_phenomena.py +++ /dev/null @@ -1,96 +0,0 @@ -""" -Deriving Exner Pressure and Air Temperature -=========================================== - -This example shows some processing of cubes in order to derive further related -cubes; in this case the derived cubes are Exner pressure and air temperature -which are calculated by combining air pressure, air potential temperature and -specific humidity. Finally, the two new cubes are presented side-by-side in a -plot. - -""" -import matplotlib.pyplot as plt -import matplotlib.ticker - -import iris -import iris.coords as coords -import iris.iterate -import iris.plot as iplt -import iris.quickplot as qplt - - -def limit_colorbar_ticks(contour_object): - """ - Takes a contour object which has an associated colorbar and limits the - number of ticks on the colorbar to 4. - - """ - # Under Matplotlib v1.2.x the colorbar attribute of a contour object is - # a tuple containing the colorbar and an axes object, whereas under - # Matplotlib v1.3.x it is simply the colorbar. - try: - colorbar = contour_object.colorbar[0] - except (AttributeError, TypeError): - colorbar = contour_object.colorbar - - colorbar.locator = matplotlib.ticker.MaxNLocator(4) - colorbar.update_ticks() - - -def main(): - fname = iris.sample_data_path('colpex.pp') - - # The list of phenomena of interest - phenomena = ['air_potential_temperature', 'air_pressure'] - - # Define the constraint on standard name and model level - constraints = [iris.Constraint(phenom, model_level_number=1) for - phenom in phenomena] - - air_potential_temperature, air_pressure = iris.load_cubes(fname, - constraints) - - # Define a coordinate which represents 1000 hPa - p0 = coords.AuxCoord(1000, long_name='P0', units='hPa') - # Convert reference pressure 'p0' into the same units as 'air_pressure' - p0.convert_units(air_pressure.units) - - # Calculate Exner pressure - exner_pressure = (air_pressure / p0) ** (287.05 / 1005.0) - # Set the name (the unit is scalar) - exner_pressure.rename('exner_pressure') - - # Calculate air_temp - air_temperature = exner_pressure * air_potential_temperature - # Set the name (the unit is K) - air_temperature.rename('air_temperature') - - # Now create an iterator which will give us lat lon slices of - # exner pressure and air temperature in the form - # (exner_slice, air_temp_slice). - lat_lon_slice_pairs = iris.iterate.izip(exner_pressure, - air_temperature, - coords=['grid_latitude', - 'grid_longitude']) - - # For the purposes of this example, we only want to demonstrate the first - # plot. - lat_lon_slice_pairs = [next(lat_lon_slice_pairs)] - - plt.figure(figsize=(8, 4)) - for exner_slice, air_temp_slice in lat_lon_slice_pairs: - plt.subplot(121) - cont = qplt.contourf(exner_slice) - - # The default colorbar has a few too many ticks on it, causing text to - # overlap. Therefore, limit the number of ticks. - limit_colorbar_ticks(cont) - - plt.subplot(122) - cont = qplt.contourf(air_temp_slice) - limit_colorbar_ticks(cont) - iplt.show() - - -if __name__ == '__main__': - main() diff --git a/docs/iris/example_code/Meteorology/hovmoller.py b/docs/iris/example_code/Meteorology/hovmoller.py deleted file mode 100644 index 5d8b0852ac..0000000000 --- a/docs/iris/example_code/Meteorology/hovmoller.py +++ /dev/null @@ -1,49 +0,0 @@ -""" -Hovmoller diagram of monthly surface temperature -================================================ - -This example demonstrates the creation of a Hovmoller diagram with fine control -over plot ticks and labels. The data comes from the Met Office OSTIA project -and has been pre-processed to calculate the monthly mean sea surface -temperature. - -""" -import matplotlib.pyplot as plt -import matplotlib.dates as mdates - -import iris -import iris.plot as iplt -import iris.quickplot as qplt - - -def main(): - # load a single cube of surface temperature between +/- 5 latitude - fname = iris.sample_data_path('ostia_monthly.nc') - cube = iris.load_cube(fname, - iris.Constraint('surface_temperature', - latitude=lambda v: -5 < v < 5)) - - # Take the mean over latitude - cube = cube.collapsed('latitude', iris.analysis.MEAN) - - # Now that we have our data in a nice way, lets create the plot - # contour with 20 levels - qplt.contourf(cube, 20) - - # Put a custom label on the y axis - plt.ylabel('Time / years') - - # Stop matplotlib providing clever axes range padding - plt.axis('tight') - - # As we are plotting annual variability, put years as the y ticks - plt.gca().yaxis.set_major_locator(mdates.YearLocator()) - - # And format the ticks to just show the year - plt.gca().yaxis.set_major_formatter(mdates.DateFormatter('%Y')) - - iplt.show() - - -if __name__ == '__main__': - main() diff --git a/docs/iris/example_code/Meteorology/lagged_ensemble.py b/docs/iris/example_code/Meteorology/lagged_ensemble.py deleted file mode 100644 index 7898d26392..0000000000 --- a/docs/iris/example_code/Meteorology/lagged_ensemble.py +++ /dev/null @@ -1,157 +0,0 @@ -""" -Seasonal ensemble model plots -============================= - -This example demonstrates the loading of a lagged ensemble dataset from the -GloSea4 model, which is then used to produce two types of plot: - - * The first shows the "postage stamp" style image with an array of 14 images, - one for each ensemble member with a shared colorbar. (The missing image in - this example represents ensemble member number 6 which was a failed run) - - * The second plot shows the data limited to a region of interest, in this case - a region defined for forecasting ENSO (El Nino-Southern Oscillation), which, - for the purposes of this example, has had the ensemble mean subtracted from - each ensemble member to give an anomaly surface temperature. In practice a - better approach would be to take the climatological mean, calibrated to the - model, from each ensemble member. - -""" -import matplotlib.pyplot as plt -import numpy as np - -import iris -import iris.plot as iplt - - -def realization_metadata(cube, field, fname): - """ - A function which modifies the cube's metadata to add a "realization" - (ensemble member) coordinate from the filename if one doesn't already exist - in the cube. - - """ - # add an ensemble member coordinate if one doesn't already exist - if not cube.coords('realization'): - # the ensemble member is encoded in the filename as *_???.pp where ??? - # is the ensemble member - realization_number = fname[-6:-3] - - import iris.coords - realization_coord = iris.coords.AuxCoord(np.int32(realization_number), - 'realization') - cube.add_aux_coord(realization_coord) - - -def main(): - # extract surface temperature cubes which have an ensemble member - # coordinate, adding appropriate lagged ensemble metadata - surface_temp = iris.load_cube( - iris.sample_data_path('GloSea4', 'ensemble_???.pp'), - iris.Constraint('surface_temperature', realization=lambda value: True), - callback=realization_metadata, - ) - - # ------------------------------------------------------------------------- - # Plot #1: Ensemble postage stamps - # ------------------------------------------------------------------------- - - # for the purposes of this example, take the last time element of the cube - last_timestep = surface_temp[:, -1, :, :] - - # Make 50 evenly spaced levels which span the dataset - contour_levels = np.linspace(np.min(last_timestep.data), - np.max(last_timestep.data), - 50) - - # Create a wider than normal figure to support our many plots - plt.figure(figsize=(12, 6), dpi=100) - - # Also manually adjust the spacings which are used when creating subplots - plt.gcf().subplots_adjust(hspace=0.05, wspace=0.05, top=0.95, bottom=0.05, - left=0.075, right=0.925) - - # iterate over all possible latitude longitude slices - for cube in last_timestep.slices(['latitude', 'longitude']): - - # get the ensemble member number from the ensemble coordinate - ens_member = cube.coord('realization').points[0] - - # plot the data in a 4x4 grid, with each plot's position in the grid - # being determined by ensemble member number the special case for the - # 13th ensemble member is to have the plot at the bottom right - if ens_member == 13: - plt.subplot(4, 4, 16) - else: - plt.subplot(4, 4, ens_member+1) - - cf = iplt.contourf(cube, contour_levels) - - # add coastlines - plt.gca().coastlines() - - # make an axes to put the shared colorbar in - colorbar_axes = plt.gcf().add_axes([0.35, 0.1, 0.3, 0.05]) - colorbar = plt.colorbar(cf, colorbar_axes, orientation='horizontal') - colorbar.set_label('%s' % last_timestep.units) - - # limit the colorbar to 8 tick marks - import matplotlib.ticker - colorbar.locator = matplotlib.ticker.MaxNLocator(8) - colorbar.update_ticks() - - # get the time for the entire plot - time_coord = last_timestep.coord('time') - time = time_coord.units.num2date(time_coord.bounds[0, 0]) - - # set a global title for the postage stamps with the date formated by - # "monthname year" - plt.suptitle('Surface temperature ensemble forecasts for %s' % ( - time.strftime('%B %Y'), )) - - iplt.show() - - # ------------------------------------------------------------------------- - # Plot #2: ENSO plumes - # ------------------------------------------------------------------------- - - # Nino 3.4 lies between: 170W and 120W, 5N and 5S, so define a constraint - # which matches this - nino_3_4_constraint = iris.Constraint( - longitude=lambda v: -170+360 <= v <= -120+360, - latitude=lambda v: -5 <= v <= 5) - - nino_cube = surface_temp.extract(nino_3_4_constraint) - - # Subsetting a circular longitude coordinate always results in a circular - # coordinate, so set the coordinate to be non-circular - nino_cube.coord('longitude').circular = False - - # Calculate the horizontal mean for the nino region - mean = nino_cube.collapsed(['latitude', 'longitude'], iris.analysis.MEAN) - - # Calculate the ensemble mean of the horizontal mean. To do this, remove - # the "forecast_period" and "forecast_reference_time" coordinates which - # span both "relalization" and "time". - mean.remove_coord("forecast_reference_time") - mean.remove_coord("forecast_period") - ensemble_mean = mean.collapsed('realization', iris.analysis.MEAN) - - # take the ensemble mean from each ensemble member - mean -= ensemble_mean.data - - plt.figure() - - for ensemble_member in mean.slices(['time']): - # draw each ensemble member as a dashed line in black - iplt.plot(ensemble_member, '--k') - - plt.title('Mean temperature anomaly for ENSO 3.4 region') - plt.xlabel('Time') - plt.ylabel('Temperature anomaly / K') - - iplt.show() - - -if __name__ == '__main__': - main() diff --git a/docs/iris/example_code/Meteorology/wind_speed.py b/docs/iris/example_code/Meteorology/wind_speed.py deleted file mode 100644 index 231defb25b..0000000000 --- a/docs/iris/example_code/Meteorology/wind_speed.py +++ /dev/null @@ -1,85 +0,0 @@ -""" -Plotting wind direction using quiver -=========================================================== - -This example demonstrates using quiver to plot wind speed contours and wind -direction arrows from wind vector component input data. The vector components -are co-located in space in this case. - -For the second plot, the data used for the arrows is normalised to produce -arrows with a uniform size on the plot. - -""" - -import matplotlib.pyplot as plt -import numpy as np - -import iris -import iris.coord_categorisation -import iris.quickplot as qplt - -import cartopy -import cartopy.feature as cfeat -import cartopy.crs as ccrs - - -def main(): - # Load the u and v components of wind from a pp file - infile = iris.sample_data_path('wind_speed_lake_victoria.pp') - - uwind = iris.load_cube(infile, 'x_wind') - vwind = iris.load_cube(infile, 'y_wind') - - ulon = uwind.coord('longitude') - vlon = vwind.coord('longitude') - - # The longitude points go from 180 to 540, so subtract 360 from them - ulon.points = ulon.points - 360.0 - vlon.points = vlon.points - 360.0 - - # Create a cube containing the wind speed - windspeed = (uwind ** 2 + vwind ** 2) ** 0.5 - windspeed.rename('windspeed') - - x = ulon.points - y = uwind.coord('latitude').points - u = uwind.data - v = vwind.data - - # Set up axes to show the lake - lakes = cfeat.NaturalEarthFeature('physical', 'lakes', '50m', - facecolor='none') - - plt.figure() - ax = plt.axes(projection=ccrs.PlateCarree()) - ax.add_feature(lakes) - - # Get the coordinate reference system used by the data - transform = ulon.coord_system.as_cartopy_projection() - - # Plot the wind speed as a contour plot - qplt.contourf(windspeed, 20) - - # Add arrows to show the wind vectors - plt.quiver(x, y, u, v, pivot='middle', transform=transform) - - plt.title("Wind speed over Lake Victoria") - qplt.show() - - # Normalise the data for uniform arrow size - u_norm = u / np.sqrt(u ** 2.0 + v ** 2.0) - v_norm = v / np.sqrt(u ** 2.0 + v ** 2.0) - - plt.figure() - ax = plt.axes(projection=ccrs.PlateCarree()) - ax.add_feature(lakes) - - qplt.contourf(windspeed, 20) - - plt.quiver(x, y, u_norm, v_norm, pivot='middle', transform=transform) - - plt.title("Wind speed over Lake Victoria") - qplt.show() - -if __name__ == '__main__': - main() diff --git a/docs/iris/example_code/Oceanography/__init__.py b/docs/iris/example_code/Oceanography/__init__.py deleted file mode 100644 index afac828a05..0000000000 --- a/docs/iris/example_code/Oceanography/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -""" -Oceanography visualisation examples -=================================== -""" diff --git a/docs/iris/example_code/Oceanography/atlantic_profiles.py b/docs/iris/example_code/Oceanography/atlantic_profiles.py deleted file mode 100644 index b3d76ea632..0000000000 --- a/docs/iris/example_code/Oceanography/atlantic_profiles.py +++ /dev/null @@ -1,82 +0,0 @@ -""" -Oceanographic profiles and T-S diagrams -======================================= - -This example demonstrates how to plot vertical profiles of different -variables in the same axes, and how to make a scatter plot of two -variables. There is an oceanographic theme but the same techniques are -equally applicable to atmospheric or other kinds of data. - -The data used are profiles of potential temperature and salinity in the -Equatorial and South Atlantic, output from an ocean model. - -The y-axis of the first plot produced will be automatically inverted due to the -presence of the attribute positive=down on the depth coordinate. This means -depth values intuitively increase downward on the y-axis. - -""" -import iris -import iris.iterate -import iris.plot as iplt -import matplotlib.pyplot as plt - - -def main(): - # Load the gridded temperature and salinity data. - fname = iris.sample_data_path('atlantic_profiles.nc') - cubes = iris.load(fname) - theta, = cubes.extract('sea_water_potential_temperature') - salinity, = cubes.extract('sea_water_practical_salinity') - - # Extract profiles of temperature and salinity from a particular point in - # the southern portion of the domain, and limit the depth of the profile - # to 1000m. - lon_cons = iris.Constraint(longitude=330.5) - lat_cons = iris.Constraint(latitude=lambda l: -10 < l < -9) - depth_cons = iris.Constraint(depth=lambda d: d <= 1000) - theta_1000m = theta.extract(depth_cons & lon_cons & lat_cons) - salinity_1000m = salinity.extract(depth_cons & lon_cons & lat_cons) - - # Plot these profiles on the same set of axes. In each case we call plot - # with two arguments, the cube followed by the depth coordinate. Putting - # them in this order places the depth coordinate on the y-axis. - # The first plot is in the default axes. We'll use the same color for the - # curve and its axes/tick labels. - plt.figure(figsize=(5, 6)) - temperature_color = (.3, .4, .5) - ax1 = plt.gca() - iplt.plot(theta_1000m, theta_1000m.coord('depth'), linewidth=2, - color=temperature_color, alpha=.75) - ax1.set_xlabel('Potential Temperature / K', color=temperature_color) - ax1.set_ylabel('Depth / m') - for ticklabel in ax1.get_xticklabels(): - ticklabel.set_color(temperature_color) - # To plot salinity in the same axes we use twiny(). We'll use a different - # color to identify salinity. - salinity_color = (.6, .1, .15) - ax2 = plt.gca().twiny() - iplt.plot(salinity_1000m, salinity_1000m.coord('depth'), linewidth=2, - color=salinity_color, alpha=.75) - ax2.set_xlabel('Salinity / PSU', color=salinity_color) - for ticklabel in ax2.get_xticklabels(): - ticklabel.set_color(salinity_color) - plt.tight_layout() - iplt.show() - - # Now plot a T-S diagram using scatter. We'll use all the profiles here, - # and each point will be coloured according to its depth. - plt.figure(figsize=(6, 6)) - depth_values = theta.coord('depth').points - for s, t in iris.iterate.izip(salinity, theta, coords='depth'): - iplt.scatter(s, t, c=depth_values, marker='+', cmap='RdYlBu_r') - ax = plt.gca() - ax.set_xlabel('Salinity / PSU') - ax.set_ylabel('Potential Temperature / K') - cb = plt.colorbar(orientation='horizontal') - cb.set_label('Depth / m') - plt.tight_layout() - iplt.show() - - -if __name__ == '__main__': - main() diff --git a/docs/iris/example_code/Oceanography/load_nemo.py b/docs/iris/example_code/Oceanography/load_nemo.py deleted file mode 100644 index a76da68248..0000000000 --- a/docs/iris/example_code/Oceanography/load_nemo.py +++ /dev/null @@ -1,59 +0,0 @@ -""" -Load a time series of data from the NEMO model -============================================== - -This example demonstrates how to load multiple files containing data output by -the NEMO model and combine them into a time series in a single cube. The -different time dimensions in these files can prevent Iris from concatenating -them without the intervention shown here. -""" -from __future__ import unicode_literals - -import iris -import iris.plot as iplt -import iris.quickplot as qplt -import matplotlib.pyplot as plt -from iris.util import promote_aux_coord_to_dim_coord - - -def main(): - # Load the three files of sample NEMO data. - fname = iris.sample_data_path('NEMO/nemo_1m_*.nc') - cubes = iris.load(fname) - - # Some attributes are unique to each file and must be blanked - # to allow concatenation. - differing_attrs = ['file_name', 'name', 'timeStamp', 'TimeStamp'] - for cube in cubes: - for attribute in differing_attrs: - cube.attributes[attribute] = '' - - # The cubes still cannot be concatenated because their time dimension is - # time_counter rather than time. time needs to be promoted to allow - # concatenation. - for cube in cubes: - promote_aux_coord_to_dim_coord(cube, 'time') - - # The cubes can now be concatenated into a single time series. - cube = cubes.concatenate_cube() - - # Generate a time series plot of a single point - plt.figure() - y_point_index = 100 - x_point_index = 100 - qplt.plot(cube[:, y_point_index, x_point_index], 'o-') - - # Include the point's position in the plot's title - lat_point = cube.coord('latitude').points[y_point_index, x_point_index] - lat_string = '{:.3f}\u00B0 {}'.format(abs(lat_point), - 'N' if lat_point > 0. else 'S') - lon_point = cube.coord('longitude').points[y_point_index, x_point_index] - lon_string = '{:.3f}\u00B0 {}'.format(abs(lon_point), - 'E' if lon_point > 0. else 'W') - plt.title('{} at {} {}'.format(cube.long_name.capitalize(), - lat_string, lon_string)) - iplt.show() - - -if __name__ == '__main__': - main() diff --git a/docs/iris/example_tests/__init__.py b/docs/iris/example_tests/__init__.py deleted file mode 100644 index 174361337f..0000000000 --- a/docs/iris/example_tests/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# (C) British Crown Copyright 2010 - 2015, Met Office -# -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa diff --git a/docs/iris/example_tests/extest_util.py b/docs/iris/example_tests/extest_util.py deleted file mode 100644 index 3d9017b6ee..0000000000 --- a/docs/iris/example_tests/extest_util.py +++ /dev/null @@ -1,97 +0,0 @@ -# (C) British Crown Copyright 2010 - 2016, Met Office -# -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . - -""" -Provides context managers which are fundamental to the ability -to run the example tests. - -""" - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -import contextlib -import os.path -import warnings -import sys - -import matplotlib.pyplot as plt - -import iris -from iris._deprecation import IrisDeprecation -import iris.plot as iplt -import iris.quickplot as qplt - - -EXAMPLE_DIRECTORY = os.path.join(os.path.dirname(os.path.dirname(__file__)), - 'example_code') -EXAMPLE_DIRECTORIES = [os.path.join(EXAMPLE_DIRECTORY, the_dir) - for the_dir in os.listdir(EXAMPLE_DIRECTORY)] - - -@contextlib.contextmanager -def add_examples_to_path(): - """ - Creates a context manager which can be used to add the iris examples - to the PYTHONPATH. The examples are only importable throughout the lifetime - of this context manager. - - """ - orig_sys_path = sys.path - sys.path = sys.path[:] - sys.path += EXAMPLE_DIRECTORIES - yield - sys.path = orig_sys_path - - -@contextlib.contextmanager -def show_replaced_by_check_graphic(test_case): - """ - Creates a context manager which can be used to replace the functionality - of matplotlib.pyplot.show with a function which calls the check_graphic - method on the given test_case (iris.tests.IrisTest.check_graphic). - - """ - def replacement_show(): - # form a closure on test_case and tolerance - test_case.check_graphic() - - orig_show = plt.show - plt.show = iplt.show = qplt.show = replacement_show - yield - plt.show = iplt.show = qplt.show = orig_show - - -@contextlib.contextmanager -def fail_any_deprecation_warnings(): - """ - Create a context in which any deprecation warning will cause an error. - - The context also resets all the iris.FUTURE settings to the defaults, as - otherwise changes made in one test can affect subsequent ones. - - """ - with warnings.catch_warnings(): - # Detect and error all and any Iris deprecation warnings. - warnings.simplefilter("error", IrisDeprecation) - # Run with all default settings in iris.FUTURE. - default_future_kwargs = iris.Future().__dict__.copy() - for dead_option in iris.Future.deprecated_options: - # Avoid a warning when setting these ! - del default_future_kwargs[dead_option] - with iris.FUTURE.context(**default_future_kwargs): - yield diff --git a/docs/iris/example_tests/test_COP_1d_plot.py b/docs/iris/example_tests/test_COP_1d_plot.py deleted file mode 100644 index f356423b25..0000000000 --- a/docs/iris/example_tests/test_COP_1d_plot.py +++ /dev/null @@ -1,41 +0,0 @@ -# (C) British Crown Copyright 2010 - 2016, Met Office -# -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -# Import Iris tests first so that some things can be initialised before -# importing anything else. -import iris.tests as tests - -from .extest_util import (add_examples_to_path, - show_replaced_by_check_graphic, - fail_any_deprecation_warnings) - - -class TestCOP1DPlot(tests.GraphicsTest): - """Test the COP_1d_plot example code.""" - def test_COP_1d_plot(self): - with fail_any_deprecation_warnings(): - with add_examples_to_path(): - import COP_1d_plot - with show_replaced_by_check_graphic(self): - COP_1d_plot.main() - - -if __name__ == '__main__': - tests.main() diff --git a/docs/iris/example_tests/test_COP_maps.py b/docs/iris/example_tests/test_COP_maps.py deleted file mode 100644 index 10dcbcc816..0000000000 --- a/docs/iris/example_tests/test_COP_maps.py +++ /dev/null @@ -1,41 +0,0 @@ -# (C) British Crown Copyright 2010 - 2016, Met Office -# -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -# Import Iris tests first so that some things can be initialised before -# importing anything else. -import iris.tests as tests - -from .extest_util import (add_examples_to_path, - show_replaced_by_check_graphic, - fail_any_deprecation_warnings) - - -class TestCOPMaps(tests.GraphicsTest): - """Test the COP_maps example code.""" - def test_cop_maps(self): - with fail_any_deprecation_warnings(): - with add_examples_to_path(): - import COP_maps - with show_replaced_by_check_graphic(self): - COP_maps.main() - - -if __name__ == '__main__': - tests.main() diff --git a/docs/iris/example_tests/test_SOI_filtering.py b/docs/iris/example_tests/test_SOI_filtering.py deleted file mode 100644 index ea2c48920e..0000000000 --- a/docs/iris/example_tests/test_SOI_filtering.py +++ /dev/null @@ -1,41 +0,0 @@ -# (C) British Crown Copyright 2012 - 2016, Met Office -# -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -# Import Iris tests first so that some things can be initialised before -# importing anything else. -import iris.tests as tests - -from .extest_util import (add_examples_to_path, - show_replaced_by_check_graphic, - fail_any_deprecation_warnings) - - -class TestSOIFiltering(tests.GraphicsTest): - """Test the SOI_filtering example code.""" - def test_soi_filtering(self): - with fail_any_deprecation_warnings(): - with add_examples_to_path(): - import SOI_filtering - with show_replaced_by_check_graphic(self): - SOI_filtering.main() - - -if __name__ == '__main__': - tests.main() diff --git a/docs/iris/example_tests/test_TEC.py b/docs/iris/example_tests/test_TEC.py deleted file mode 100644 index e6e27c46bd..0000000000 --- a/docs/iris/example_tests/test_TEC.py +++ /dev/null @@ -1,41 +0,0 @@ -# (C) British Crown Copyright 2010 - 2016, Met Office -# -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -# Import Iris tests first so that some things can be initialised before -# importing anything else. -import iris.tests as tests - -from .extest_util import (add_examples_to_path, - show_replaced_by_check_graphic, - fail_any_deprecation_warnings) - - -class TestTEC(tests.GraphicsTest): - """Test the TEC example code.""" - def test_TEC(self): - with fail_any_deprecation_warnings(): - with add_examples_to_path(): - import TEC - with show_replaced_by_check_graphic(self): - TEC.main() - - -if __name__ == '__main__': - tests.main() diff --git a/docs/iris/example_tests/test_anomaly_log_colouring.py b/docs/iris/example_tests/test_anomaly_log_colouring.py deleted file mode 100644 index faa040c0e6..0000000000 --- a/docs/iris/example_tests/test_anomaly_log_colouring.py +++ /dev/null @@ -1,41 +0,0 @@ -# (C) British Crown Copyright 2014 - 2016, Met Office -# -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -# Import Iris tests first so that some things can be initialised before -# importing anything else. -import iris.tests as tests - -from .extest_util import (add_examples_to_path, - show_replaced_by_check_graphic, - fail_any_deprecation_warnings) - - -class TestAnomalyLogColouring(tests.GraphicsTest): - """Test the anomaly colouring example code.""" - def test_anomaly_log_colouring(self): - with fail_any_deprecation_warnings(): - with add_examples_to_path(): - import anomaly_log_colouring - with show_replaced_by_check_graphic(self): - anomaly_log_colouring.main() - - -if __name__ == '__main__': - tests.main() diff --git a/docs/iris/example_tests/test_atlantic_profiles.py b/docs/iris/example_tests/test_atlantic_profiles.py deleted file mode 100644 index 63d6b7b754..0000000000 --- a/docs/iris/example_tests/test_atlantic_profiles.py +++ /dev/null @@ -1,41 +0,0 @@ -# (C) British Crown Copyright 2013 - 2016, Met Office -# -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -# Import Iris tests first so that some things can be initialised before -# importing anything else. -import iris.tests as tests - -from .extest_util import (add_examples_to_path, - show_replaced_by_check_graphic, - fail_any_deprecation_warnings) - - -class TestAtlanticProfiles(tests.GraphicsTest): - """Test the atlantic_profiles example code.""" - def test_atlantic_profiles(self): - with fail_any_deprecation_warnings(): - with add_examples_to_path(): - import atlantic_profiles - with show_replaced_by_check_graphic(self): - atlantic_profiles.main() - - -if __name__ == '__main__': - tests.main() diff --git a/docs/iris/example_tests/test_coriolis_plot.py b/docs/iris/example_tests/test_coriolis_plot.py deleted file mode 100644 index 71a2334488..0000000000 --- a/docs/iris/example_tests/test_coriolis_plot.py +++ /dev/null @@ -1,40 +0,0 @@ -# (C) British Crown Copyright 2016, Met Office -# -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -# Import Iris tests first so that some things can be initialised before -# importing anything else. - -import iris.tests as tests - -from . import extest_util - -with extest_util.add_examples_to_path(): - import coriolis_plot - - -class TestCoriolisPlot(tests.GraphicsTest): - """Test the Coriolis Plot example code.""" - def test_coriolis_plot(self): - with extest_util.show_replaced_by_check_graphic(self): - coriolis_plot.main() - - -if __name__ == '__main__': - tests.main() diff --git a/docs/iris/example_tests/test_cross_section.py b/docs/iris/example_tests/test_cross_section.py deleted file mode 100644 index ad62f51b01..0000000000 --- a/docs/iris/example_tests/test_cross_section.py +++ /dev/null @@ -1,41 +0,0 @@ -# (C) British Crown Copyright 2010 - 2016, Met Office -# -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -# Import Iris tests first so that some things can be initialised before -# importing anything else. -import iris.tests as tests - -from .extest_util import (add_examples_to_path, - show_replaced_by_check_graphic, - fail_any_deprecation_warnings) - - -class TestCrossSection(tests.GraphicsTest): - """Test the cross_section example code.""" - def test_cross_section(self): - with fail_any_deprecation_warnings(): - with add_examples_to_path(): - import cross_section - with show_replaced_by_check_graphic(self): - cross_section.main() - - -if __name__ == '__main__': - tests.main() diff --git a/docs/iris/example_tests/test_custom_aggregation.py b/docs/iris/example_tests/test_custom_aggregation.py deleted file mode 100644 index 319078a3a4..0000000000 --- a/docs/iris/example_tests/test_custom_aggregation.py +++ /dev/null @@ -1,41 +0,0 @@ -# (C) British Crown Copyright 2013 - 2016, Met Office -# -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -# Import Iris tests first so that some things can be initialised before -# importing anything else. -import iris.tests as tests - -from .extest_util import (add_examples_to_path, - show_replaced_by_check_graphic, - fail_any_deprecation_warnings) - - -class TestCustomAggregation(tests.GraphicsTest): - """Test the custom aggregation example code.""" - def test_custom_aggregation(self): - with fail_any_deprecation_warnings(): - with add_examples_to_path(): - import custom_aggregation - with show_replaced_by_check_graphic(self): - custom_aggregation.main() - - -if __name__ == '__main__': - tests.main() diff --git a/docs/iris/example_tests/test_custom_file_loading.py b/docs/iris/example_tests/test_custom_file_loading.py deleted file mode 100644 index b0231d474e..0000000000 --- a/docs/iris/example_tests/test_custom_file_loading.py +++ /dev/null @@ -1,41 +0,0 @@ -# (C) British Crown Copyright 2010 - 2016, Met Office -# -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -# Import Iris tests first so that some things can be initialised before -# importing anything else. -import iris.tests as tests - -from .extest_util import (add_examples_to_path, - show_replaced_by_check_graphic, - fail_any_deprecation_warnings) - - -class TestCustomFileLoading(tests.GraphicsTest): - """Test the custom_file_loading example code.""" - def test_custom_file_loading(self): - with fail_any_deprecation_warnings(): - with add_examples_to_path(): - import custom_file_loading - with show_replaced_by_check_graphic(self): - custom_file_loading.main() - - -if __name__ == '__main__': - tests.main() diff --git a/docs/iris/example_tests/test_deriving_phenomena.py b/docs/iris/example_tests/test_deriving_phenomena.py deleted file mode 100644 index bad47fe438..0000000000 --- a/docs/iris/example_tests/test_deriving_phenomena.py +++ /dev/null @@ -1,41 +0,0 @@ -# (C) British Crown Copyright 2010 - 2016, Met Office -# -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -# Import Iris tests first so that some things can be initialised before -# importing anything else. -import iris.tests as tests - -from .extest_util import (add_examples_to_path, - show_replaced_by_check_graphic, - fail_any_deprecation_warnings) - - -class TestDerivingPhenomena(tests.GraphicsTest): - """Test the deriving_phenomena example code.""" - def test_deriving_phenomena(self): - with fail_any_deprecation_warnings(): - with add_examples_to_path(): - import deriving_phenomena - with show_replaced_by_check_graphic(self): - deriving_phenomena.main() - - -if __name__ == '__main__': - tests.main() diff --git a/docs/iris/example_tests/test_global_map.py b/docs/iris/example_tests/test_global_map.py deleted file mode 100644 index eee10d6187..0000000000 --- a/docs/iris/example_tests/test_global_map.py +++ /dev/null @@ -1,41 +0,0 @@ -# (C) British Crown Copyright 2010 - 2016, Met Office -# -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -# Import Iris tests first so that some things can be initialised before -# importing anything else. -import iris.tests as tests - -from .extest_util import (add_examples_to_path, - show_replaced_by_check_graphic, - fail_any_deprecation_warnings) - - -class TestGlobalMap(tests.GraphicsTest): - """Test the global_map example code.""" - def test_global_map(self): - with fail_any_deprecation_warnings(): - with add_examples_to_path(): - import global_map - with show_replaced_by_check_graphic(self): - global_map.main() - - -if __name__ == '__main__': - tests.main() diff --git a/docs/iris/example_tests/test_hovmoller.py b/docs/iris/example_tests/test_hovmoller.py deleted file mode 100644 index ff0f37b1eb..0000000000 --- a/docs/iris/example_tests/test_hovmoller.py +++ /dev/null @@ -1,41 +0,0 @@ -# (C) British Crown Copyright 2010 - 2016, Met Office -# -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -# Import Iris tests first so that some things can be initialised before -# importing anything else. -import iris.tests as tests - -from .extest_util import (add_examples_to_path, - show_replaced_by_check_graphic, - fail_any_deprecation_warnings) - - -class TestGlobalMap(tests.GraphicsTest): - """Test the hovmoller example code.""" - def test_hovmoller(self): - with fail_any_deprecation_warnings(): - with add_examples_to_path(): - import hovmoller - with show_replaced_by_check_graphic(self): - hovmoller.main() - - -if __name__ == '__main__': - tests.main() diff --git a/docs/iris/example_tests/test_inset_plot.py b/docs/iris/example_tests/test_inset_plot.py deleted file mode 100644 index 67c4eb9e66..0000000000 --- a/docs/iris/example_tests/test_inset_plot.py +++ /dev/null @@ -1,42 +0,0 @@ -# (C) British Crown Copyright 2016, Met Office -# -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -# Import Iris tests first so that some things can be initialised before -# importing anything else. - -import iris.tests as tests - -from .extest_util import (add_examples_to_path, - show_replaced_by_check_graphic, - fail_any_deprecation_warnings) - - -class TestInsetPlot(tests.GraphicsTest): - """Test the inset plot example code.""" - def test_inset_plot(self): - with fail_any_deprecation_warnings(): - with add_examples_to_path(): - import inset_plot - with show_replaced_by_check_graphic(self): - inset_plot.main() - - -if __name__ == '__main__': - tests.main() diff --git a/docs/iris/example_tests/test_lagged_ensemble.py b/docs/iris/example_tests/test_lagged_ensemble.py deleted file mode 100644 index faa7960dbb..0000000000 --- a/docs/iris/example_tests/test_lagged_ensemble.py +++ /dev/null @@ -1,41 +0,0 @@ -# (C) British Crown Copyright 2010 - 2016, Met Office -# -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -# Import Iris tests first so that some things can be initialised before -# importing anything else. -import iris.tests as tests - -from .extest_util import (add_examples_to_path, - show_replaced_by_check_graphic, - fail_any_deprecation_warnings) - - -class TestLaggedEnsemble(tests.GraphicsTest): - """Test the lagged ensemble example code.""" - def test_lagged_ensemble(self): - with fail_any_deprecation_warnings(): - with add_examples_to_path(): - import lagged_ensemble - with show_replaced_by_check_graphic(self): - lagged_ensemble.main() - - -if __name__ == '__main__': - tests.main() diff --git a/docs/iris/example_tests/test_lineplot_with_legend.py b/docs/iris/example_tests/test_lineplot_with_legend.py deleted file mode 100644 index a7f9e54b6f..0000000000 --- a/docs/iris/example_tests/test_lineplot_with_legend.py +++ /dev/null @@ -1,41 +0,0 @@ -# (C) British Crown Copyright 2010 - 2016, Met Office -# -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -# Import Iris tests first so that some things can be initialised before -# importing anything else. -import iris.tests as tests - -from .extest_util import (add_examples_to_path, - show_replaced_by_check_graphic, - fail_any_deprecation_warnings) - - -class TestLineplotWithLegend(tests.GraphicsTest): - """Test the lineplot_with_legend example code.""" - def test_lineplot_with_legend(self): - with fail_any_deprecation_warnings(): - with add_examples_to_path(): - import lineplot_with_legend - with show_replaced_by_check_graphic(self): - lineplot_with_legend.main() - - -if __name__ == '__main__': - tests.main() diff --git a/docs/iris/example_tests/test_load_nemo.py b/docs/iris/example_tests/test_load_nemo.py deleted file mode 100644 index 13785609e2..0000000000 --- a/docs/iris/example_tests/test_load_nemo.py +++ /dev/null @@ -1,41 +0,0 @@ -# (C) British Crown Copyright 2019, Met Office -# -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -# Import Iris tests first so that some things can be initialised before -# importing anything else. -import iris.tests as tests - -from .extest_util import (add_examples_to_path, - show_replaced_by_check_graphic, - fail_any_deprecation_warnings) - - -class TestLoadNemo(tests.GraphicsTest): - """Test the load_nemo example code.""" - def test_load_nemo(self): - with fail_any_deprecation_warnings(): - with add_examples_to_path(): - import load_nemo - with show_replaced_by_check_graphic(self): - load_nemo.main() - - -if __name__ == '__main__': - tests.main() diff --git a/docs/iris/example_tests/test_orca_projection.py b/docs/iris/example_tests/test_orca_projection.py deleted file mode 100644 index 5d7b5e4114..0000000000 --- a/docs/iris/example_tests/test_orca_projection.py +++ /dev/null @@ -1,41 +0,0 @@ -# (C) British Crown Copyright 2014 - 2016, Met Office -# -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -# Import Iris tests first so that some things can be initialised before -# importing anything else. -import iris.tests as tests - -from .extest_util import (add_examples_to_path, - show_replaced_by_check_graphic, - fail_any_deprecation_warnings) - - -class TestOrcaProjection(tests.GraphicsTest): - """Test the orca projection example code.""" - def test_orca_projection(self): - with fail_any_deprecation_warnings(): - with add_examples_to_path(): - import orca_projection - with show_replaced_by_check_graphic(self): - orca_projection.main() - - -if __name__ == '__main__': - tests.main() diff --git a/docs/iris/example_tests/test_polar_stereo.py b/docs/iris/example_tests/test_polar_stereo.py deleted file mode 100644 index c2bbc0e02b..0000000000 --- a/docs/iris/example_tests/test_polar_stereo.py +++ /dev/null @@ -1,42 +0,0 @@ -# (C) British Crown Copyright 2013 - 2016, Met Office -# -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -# Import Iris tests first so that some things can be initialised before -# importing anything else. -import iris.tests as tests - -from .extest_util import (add_examples_to_path, - show_replaced_by_check_graphic, - fail_any_deprecation_warnings) - - -@tests.skip_grib -class TestPolarStereo(tests.GraphicsTest): - """Test the polar_stereo example code.""" - def test_polar_stereo(self): - with fail_any_deprecation_warnings(): - with add_examples_to_path(): - import polar_stereo - with show_replaced_by_check_graphic(self): - polar_stereo.main() - - -if __name__ == '__main__': - tests.main() diff --git a/docs/iris/example_tests/test_polynomial_fit.py b/docs/iris/example_tests/test_polynomial_fit.py deleted file mode 100644 index c56834a961..0000000000 --- a/docs/iris/example_tests/test_polynomial_fit.py +++ /dev/null @@ -1,41 +0,0 @@ -# (C) British Crown Copyright 2016, Met Office -# -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -# Import Iris tests first so that some things can be initialised before -# importing anything else. -import iris.tests as tests - -from .extest_util import (add_examples_to_path, - show_replaced_by_check_graphic, - fail_any_deprecation_warnings) - - -class TestPolynomialFit(tests.GraphicsTest): - """Test the polynomial_fit example code.""" - def test_polynomial_fit(self): - with fail_any_deprecation_warnings(): - with add_examples_to_path(): - import polynomial_fit - with show_replaced_by_check_graphic(self): - polynomial_fit.main() - - -if __name__ == '__main__': - tests.main() diff --git a/docs/iris/example_tests/test_projections_and_annotations.py b/docs/iris/example_tests/test_projections_and_annotations.py deleted file mode 100644 index adfcc45026..0000000000 --- a/docs/iris/example_tests/test_projections_and_annotations.py +++ /dev/null @@ -1,41 +0,0 @@ -# (C) British Crown Copyright 2014 - 2016, Met Office -# -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -# Import Iris tests first so that some things can be initialised before -# importing anything else. -import iris.tests as tests - -from .extest_util import (add_examples_to_path, - show_replaced_by_check_graphic, - fail_any_deprecation_warnings) - - -class TestProjectionsAndAnnotations(tests.GraphicsTest): - """Test the atlantic_profiles example code.""" - def test_projections_and_annotations(self): - with fail_any_deprecation_warnings(): - with add_examples_to_path(): - import projections_and_annotations - with show_replaced_by_check_graphic(self): - projections_and_annotations.main() - - -if __name__ == '__main__': - tests.main() diff --git a/docs/iris/example_tests/test_rotated_pole_mapping.py b/docs/iris/example_tests/test_rotated_pole_mapping.py deleted file mode 100644 index 672ba50434..0000000000 --- a/docs/iris/example_tests/test_rotated_pole_mapping.py +++ /dev/null @@ -1,41 +0,0 @@ -# (C) British Crown Copyright 2010 - 2016, Met Office -# -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -# Import Iris tests first so that some things can be initialised before -# importing anything else. -import iris.tests as tests - -from .extest_util import (add_examples_to_path, - show_replaced_by_check_graphic, - fail_any_deprecation_warnings) - - -class TestRotatedPoleMapping(tests.GraphicsTest): - """Test the rotated_pole_mapping example code.""" - def test_rotated_pole_mapping(self): - with fail_any_deprecation_warnings(): - with add_examples_to_path(): - import rotated_pole_mapping - with show_replaced_by_check_graphic(self): - rotated_pole_mapping.main() - - -if __name__ == '__main__': - tests.main() diff --git a/docs/iris/example_tests/test_wind_speed.py b/docs/iris/example_tests/test_wind_speed.py deleted file mode 100644 index dacf146c91..0000000000 --- a/docs/iris/example_tests/test_wind_speed.py +++ /dev/null @@ -1,41 +0,0 @@ -# (C) British Crown Copyright 2010 - 2016, Met Office -# -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -# Import Iris tests first so that some things can be initialised before -# importing anything else. -import iris.tests as tests - -from .extest_util import (add_examples_to_path, - show_replaced_by_check_graphic, - fail_any_deprecation_warnings) - - -class TestWindSpeed(tests.GraphicsTest): - """Test the wind_speed example code.""" - def test_wind_speed(self): - with fail_any_deprecation_warnings(): - with add_examples_to_path(): - import wind_speed - with show_replaced_by_check_graphic(self): - wind_speed.main() - - -if __name__ == '__main__': - tests.main() diff --git a/docs/iris/src/Makefile b/docs/iris/src/Makefile deleted file mode 100644 index 53d224874d..0000000000 --- a/docs/iris/src/Makefile +++ /dev/null @@ -1,133 +0,0 @@ -# Makefile for Sphinx documentation -# - -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = sphinx-build -PAPER = -BUILDDIR = ../build -SRCDIR = ./ - -# Internal variables. -PAPEROPT_a4 = -D latex_paper_size=a4 -PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . - -.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest - -help: - @echo "Please use \`make ' where is one of" - @echo " html to make standalone HTML files" - @echo " dirhtml to make HTML files named index.html in directories" - @echo " singlehtml to make a single large HTML file" - @echo " pickle to make pickle files" - @echo " json to make JSON files" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " qthelp to make HTML files and a qthelp project" - @echo " devhelp to make HTML files and a Devhelp project" - @echo " epub to make an epub" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " latexpdf to make LaTeX files and run them through pdflatex" - @echo " text to make text files" - @echo " man to make manual pages" - @echo " changes to make an overview of all changed/added/deprecated items" - @echo " linkcheck to check all external links for integrity" - @echo " doctest to run all doctests embedded in the documentation (if enabled)" - -clean: - -rm -rf $(BUILDDIR)/* - -rm -rf $(SRCDIR)/iris - -rm -rf $(SRCDIR)/examples $(SRCDIR)/_templates/gallery.html $(SRCDIR)/_static/random_image.js $(SRCDIR)/_static/random.js - -html: - $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." - -dirhtml: - $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." - -singlehtml: - $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml - @echo - @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." - -pickle: - $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle - @echo - @echo "Build finished; now you can process the pickle files." - -json: - $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json - @echo - @echo "Build finished; now you can process the JSON files." - -htmlhelp: - $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp - @echo - @echo "Build finished; now you can run HTML Help Workshop with the" \ - ".hhp project file in $(BUILDDIR)/htmlhelp." - -qthelp: - $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp - @echo - @echo "Build finished; now you can run "qcollectiongenerator" with the" \ - ".qhcp project file in $(BUILDDIR)/qthelp, like this:" - @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Iris.qhcp" - @echo "To view the help file:" - @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Iris.qhc" - -devhelp: - $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp - @echo - @echo "Build finished." - @echo "To view the help file:" - @echo "# mkdir -p $$HOME/.local/share/devhelp/Iris" - @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Iris" - @echo "# devhelp" - -epub: - $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub - @echo - @echo "Build finished. The epub file is in $(BUILDDIR)/epub." - -latex: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo - @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." - @echo "Run \`make' in that directory to run these through (pdf)latex" \ - "(use \`make latexpdf' here to do that automatically)." - -latexpdf: latex - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through pdflatex..." - make -C $(BUILDDIR)/latex all-pdf - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -text: - $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text - @echo - @echo "Build finished. The text files are in $(BUILDDIR)/text." - -man: - $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man - @echo - @echo "Build finished. The manual pages are in $(BUILDDIR)/man." - -changes: - $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes - @echo - @echo "The overview file is in $(BUILDDIR)/changes." - -linkcheck: - $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck - @echo - @echo "Link check complete; look for any errors in the above output " \ - "or in $(BUILDDIR)/linkcheck/output.txt." - -doctest: - $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest - @echo "Testing of doctests in the sources finished, look at the " \ - "results in $(BUILDDIR)/doctest/output.txt." diff --git a/docs/iris/src/_static/Iris7_1_trim_100.png b/docs/iris/src/_static/Iris7_1_trim_100.png deleted file mode 100644 index 330ee6e95d..0000000000 Binary files a/docs/iris/src/_static/Iris7_1_trim_100.png and /dev/null differ diff --git a/docs/iris/src/_static/Iris7_1_trim_full.png b/docs/iris/src/_static/Iris7_1_trim_full.png deleted file mode 100644 index ac219de136..0000000000 Binary files a/docs/iris/src/_static/Iris7_1_trim_full.png and /dev/null differ diff --git a/docs/iris/src/_static/copybutton.js b/docs/iris/src/_static/copybutton.js deleted file mode 100644 index 6800c3cb93..0000000000 --- a/docs/iris/src/_static/copybutton.js +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2013 PSF. Licensed under the PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 -// File originates from the cpython source found in Doc/tools/sphinxext/static/copybutton.js - -$(document).ready(function() { - /* Add a [>>>] button on the top-right corner of code samples to hide - * the >>> and ... prompts and the output and thus make the code - * copyable. */ - var div = $('.highlight-python .highlight,' + - '.highlight-python3 .highlight') - var pre = div.find('pre'); - - // get the styles from the current theme - pre.parent().parent().css('position', 'relative'); - var hide_text = 'Hide the prompts and output'; - var show_text = 'Show the prompts and output'; - var border_width = pre.css('border-top-width'); - var border_style = pre.css('border-top-style'); - var border_color = pre.css('border-top-color'); - var button_styles = { - 'cursor':'pointer', 'position': 'absolute', 'top': '0', 'right': '0', - 'border-color': border_color, 'border-style': border_style, - 'border-width': border_width, 'color': border_color, 'text-size': '75%', - 'font-family': 'monospace', 'padding-left': '0.2em', 'padding-right': '0.2em' - } - - // create and add the button to all the code blocks that contain >>> - div.each(function(index) { - var jthis = $(this); - if (jthis.find('.gp').length > 0) { - var button = $('>>>'); - button.css(button_styles) - button.attr('title', hide_text); - jthis.prepend(button); - } - // tracebacks (.gt) contain bare text elements that need to be - // wrapped in a span to work with .nextUntil() (see later) - jthis.find('pre:has(.gt)').contents().filter(function() { - return ((this.nodeType == 3) && (this.data.trim().length > 0)); - }).wrap(''); - }); - - // define the behavior of the button when it's clicked - $('.copybutton').toggle( - function() { - var button = $(this); - button.parent().find('.go, .gp, .gt').hide(); - button.next('pre').find('.gt').nextUntil('.gp, .go').css('visibility', 'hidden'); - button.css('text-decoration', 'line-through'); - button.attr('title', show_text); - }, - function() { - var button = $(this); - button.parent().find('.go, .gp, .gt').show(); - button.next('pre').find('.gt').nextUntil('.gp, .go').css('visibility', 'visible'); - button.css('text-decoration', 'none'); - button.attr('title', hide_text); - }); -}); - diff --git a/docs/iris/src/_static/favicon-16x16.png b/docs/iris/src/_static/favicon-16x16.png deleted file mode 100644 index ea64d21a55..0000000000 Binary files a/docs/iris/src/_static/favicon-16x16.png and /dev/null differ diff --git a/docs/iris/src/_static/favicon-32x32.png b/docs/iris/src/_static/favicon-32x32.png deleted file mode 100644 index 9270dd6a99..0000000000 Binary files a/docs/iris/src/_static/favicon-32x32.png and /dev/null differ diff --git a/docs/iris/src/_static/jquery.cycle.all.latest.js b/docs/iris/src/_static/jquery.cycle.all.latest.js deleted file mode 100644 index 75d7ab98f8..0000000000 --- a/docs/iris/src/_static/jquery.cycle.all.latest.js +++ /dev/null @@ -1,1331 +0,0 @@ -/*! - * jQuery Cycle Plugin (with Transition Definitions) - * Examples and documentation at: http://jquery.malsup.com/cycle/ - * Copyright (c) 2007-2010 M. Alsup - * Version: 2.88 (08-JUN-2010) - * Dual licensed under the MIT and GPL licenses. - * http://jquery.malsup.com/license.html - * Requires: jQuery v1.2.6 or later - */ -;(function($) { - -var ver = '2.88'; - -// if $.support is not defined (pre jQuery 1.3) add what I need -if ($.support == undefined) { - $.support = { - opacity: !($.browser.msie) - }; -} - -function debug(s) { - if ($.fn.cycle.debug) - log(s); -} -function log() { - if (window.console && window.console.log) - window.console.log('[cycle] ' + Array.prototype.join.call(arguments,' ')); -}; - -// the options arg can be... -// a number - indicates an immediate transition should occur to the given slide index -// a string - 'pause', 'resume', 'toggle', 'next', 'prev', 'stop', 'destroy' or the name of a transition effect (ie, 'fade', 'zoom', etc) -// an object - properties to control the slideshow -// -// the arg2 arg can be... -// the name of an fx (only used in conjunction with a numeric value for 'options') -// the value true (only used in first arg == 'resume') and indicates -// that the resume should occur immediately (not wait for next timeout) - -$.fn.cycle = function(options, arg2) { - var o = { s: this.selector, c: this.context }; - - // in 1.3+ we can fix mistakes with the ready state - if (this.length === 0 && options != 'stop') { - if (!$.isReady && o.s) { - log('DOM not ready, queuing slideshow'); - $(function() { - $(o.s,o.c).cycle(options,arg2); - }); - return this; - } - // is your DOM ready? http://docs.jquery.com/Tutorials:Introducing_$(document).ready() - log('terminating; zero elements found by selector' + ($.isReady ? '' : ' (DOM not ready)')); - return this; - } - - // iterate the matched nodeset - return this.each(function() { - var opts = handleArguments(this, options, arg2); - if (opts === false) - return; - - opts.updateActivePagerLink = opts.updateActivePagerLink || $.fn.cycle.updateActivePagerLink; - - // stop existing slideshow for this container (if there is one) - if (this.cycleTimeout) - clearTimeout(this.cycleTimeout); - this.cycleTimeout = this.cyclePause = 0; - - var $cont = $(this); - var $slides = opts.slideExpr ? $(opts.slideExpr, this) : $cont.children(); - var els = $slides.get(); - if (els.length < 2) { - log('terminating; too few slides: ' + els.length); - return; - } - - var opts2 = buildOptions($cont, $slides, els, opts, o); - if (opts2 === false) - return; - - var startTime = opts2.continuous ? 10 : getTimeout(els[opts2.currSlide], els[opts2.nextSlide], opts2, !opts2.rev); - - // if it's an auto slideshow, kick it off - if (startTime) { - startTime += (opts2.delay || 0); - if (startTime < 10) - startTime = 10; - debug('first timeout: ' + startTime); - this.cycleTimeout = setTimeout(function(){go(els,opts2,0,(!opts2.rev && !opts.backwards))}, startTime); - } - }); -}; - -// process the args that were passed to the plugin fn -function handleArguments(cont, options, arg2) { - if (cont.cycleStop == undefined) - cont.cycleStop = 0; - if (options === undefined || options === null) - options = {}; - if (options.constructor == String) { - switch(options) { - case 'destroy': - case 'stop': - var opts = $(cont).data('cycle.opts'); - if (!opts) - return false; - cont.cycleStop++; // callbacks look for change - if (cont.cycleTimeout) - clearTimeout(cont.cycleTimeout); - cont.cycleTimeout = 0; - $(cont).removeData('cycle.opts'); - if (options == 'destroy') - destroy(opts); - return false; - case 'toggle': - cont.cyclePause = (cont.cyclePause === 1) ? 0 : 1; - checkInstantResume(cont.cyclePause, arg2, cont); - return false; - case 'pause': - cont.cyclePause = 1; - return false; - case 'resume': - cont.cyclePause = 0; - checkInstantResume(false, arg2, cont); - return false; - case 'prev': - case 'next': - var opts = $(cont).data('cycle.opts'); - if (!opts) { - log('options not found, "prev/next" ignored'); - return false; - } - $.fn.cycle[options](opts); - return false; - default: - options = { fx: options }; - }; - return options; - } - else if (options.constructor == Number) { - // go to the requested slide - var num = options; - options = $(cont).data('cycle.opts'); - if (!options) { - log('options not found, can not advance slide'); - return false; - } - if (num < 0 || num >= options.elements.length) { - log('invalid slide index: ' + num); - return false; - } - options.nextSlide = num; - if (cont.cycleTimeout) { - clearTimeout(cont.cycleTimeout); - cont.cycleTimeout = 0; - } - if (typeof arg2 == 'string') - options.oneTimeFx = arg2; - go(options.elements, options, 1, num >= options.currSlide); - return false; - } - return options; - - function checkInstantResume(isPaused, arg2, cont) { - if (!isPaused && arg2 === true) { // resume now! - var options = $(cont).data('cycle.opts'); - if (!options) { - log('options not found, can not resume'); - return false; - } - if (cont.cycleTimeout) { - clearTimeout(cont.cycleTimeout); - cont.cycleTimeout = 0; - } - go(options.elements, options, 1, (!opts.rev && !opts.backwards)); - } - } -}; - -function removeFilter(el, opts) { - if (!$.support.opacity && opts.cleartype && el.style.filter) { - try { el.style.removeAttribute('filter'); } - catch(smother) {} // handle old opera versions - } -}; - -// unbind event handlers -function destroy(opts) { - if (opts.next) - $(opts.next).unbind(opts.prevNextEvent); - if (opts.prev) - $(opts.prev).unbind(opts.prevNextEvent); - - if (opts.pager || opts.pagerAnchorBuilder) - $.each(opts.pagerAnchors || [], function() { - this.unbind().remove(); - }); - opts.pagerAnchors = null; - if (opts.destroy) // callback - opts.destroy(opts); -}; - -// one-time initialization -function buildOptions($cont, $slides, els, options, o) { - // support metadata plugin (v1.0 and v2.0) - var opts = $.extend({}, $.fn.cycle.defaults, options || {}, $.metadata ? $cont.metadata() : $.meta ? $cont.data() : {}); - if (opts.autostop) - opts.countdown = opts.autostopCount || els.length; - - var cont = $cont[0]; - $cont.data('cycle.opts', opts); - opts.$cont = $cont; - opts.stopCount = cont.cycleStop; - opts.elements = els; - opts.before = opts.before ? [opts.before] : []; - opts.after = opts.after ? [opts.after] : []; - opts.after.unshift(function(){ opts.busy=0; }); - - // push some after callbacks - if (!$.support.opacity && opts.cleartype) - opts.after.push(function() { removeFilter(this, opts); }); - if (opts.continuous) - opts.after.push(function() { go(els,opts,0,(!opts.rev && !opts.backwards)); }); - - saveOriginalOpts(opts); - - // clearType corrections - if (!$.support.opacity && opts.cleartype && !opts.cleartypeNoBg) - clearTypeFix($slides); - - // container requires non-static position so that slides can be position within - if ($cont.css('position') == 'static') - $cont.css('position', 'relative'); - if (opts.width) - $cont.width(opts.width); - if (opts.height && opts.height != 'auto') - $cont.height(opts.height); - - if (opts.startingSlide) - opts.startingSlide = parseInt(opts.startingSlide); - else if (opts.backwards) - opts.startingSlide = els.length - 1; - - // if random, mix up the slide array - if (opts.random) { - opts.randomMap = []; - for (var i = 0; i < els.length; i++) - opts.randomMap.push(i); - opts.randomMap.sort(function(a,b) {return Math.random() - 0.5;}); - opts.randomIndex = 1; - opts.startingSlide = opts.randomMap[1]; - } - else if (opts.startingSlide >= els.length) - opts.startingSlide = 0; // catch bogus input - opts.currSlide = opts.startingSlide || 0; - var first = opts.startingSlide; - - // set position and zIndex on all the slides - $slides.css({position: 'absolute', top:0, left:0}).hide().each(function(i) { - var z; - if (opts.backwards) - z = first ? i <= first ? els.length + (i-first) : first-i : els.length-i; - else - z = first ? i >= first ? els.length - (i-first) : first-i : els.length-i; - $(this).css('z-index', z) - }); - - // make sure first slide is visible - $(els[first]).css('opacity',1).show(); // opacity bit needed to handle restart use case - removeFilter(els[first], opts); - - // stretch slides - if (opts.fit && opts.width) - $slides.width(opts.width); - if (opts.fit && opts.height && opts.height != 'auto') - $slides.height(opts.height); - - // stretch container - var reshape = opts.containerResize && !$cont.innerHeight(); - if (reshape) { // do this only if container has no size http://tinyurl.com/da2oa9 - var maxw = 0, maxh = 0; - for(var j=0; j < els.length; j++) { - var $e = $(els[j]), e = $e[0], w = $e.outerWidth(), h = $e.outerHeight(); - if (!w) w = e.offsetWidth || e.width || $e.attr('width') - if (!h) h = e.offsetHeight || e.height || $e.attr('height'); - maxw = w > maxw ? w : maxw; - maxh = h > maxh ? h : maxh; - } - if (maxw > 0 && maxh > 0) - $cont.css({width:maxw+'px',height:maxh+'px'}); - } - - if (opts.pause) - $cont.hover(function(){this.cyclePause++;},function(){this.cyclePause--;}); - - if (supportMultiTransitions(opts) === false) - return false; - - // apparently a lot of people use image slideshows without height/width attributes on the images. - // Cycle 2.50+ requires the sizing info for every slide; this block tries to deal with that. - var requeue = false; - options.requeueAttempts = options.requeueAttempts || 0; - $slides.each(function() { - // try to get height/width of each slide - var $el = $(this); - this.cycleH = (opts.fit && opts.height) ? opts.height : ($el.height() || this.offsetHeight || this.height || $el.attr('height') || 0); - this.cycleW = (opts.fit && opts.width) ? opts.width : ($el.width() || this.offsetWidth || this.width || $el.attr('width') || 0); - - if ( $el.is('img') ) { - // sigh.. sniffing, hacking, shrugging... this crappy hack tries to account for what browsers do when - // an image is being downloaded and the markup did not include sizing info (height/width attributes); - // there seems to be some "default" sizes used in this situation - var loadingIE = ($.browser.msie && this.cycleW == 28 && this.cycleH == 30 && !this.complete); - var loadingFF = ($.browser.mozilla && this.cycleW == 34 && this.cycleH == 19 && !this.complete); - var loadingOp = ($.browser.opera && ((this.cycleW == 42 && this.cycleH == 19) || (this.cycleW == 37 && this.cycleH == 17)) && !this.complete); - var loadingOther = (this.cycleH == 0 && this.cycleW == 0 && !this.complete); - // don't requeue for images that are still loading but have a valid size - if (loadingIE || loadingFF || loadingOp || loadingOther) { - if (o.s && opts.requeueOnImageNotLoaded && ++options.requeueAttempts < 100) { // track retry count so we don't loop forever - log(options.requeueAttempts,' - img slide not loaded, requeuing slideshow: ', this.src, this.cycleW, this.cycleH); - setTimeout(function() {$(o.s,o.c).cycle(options)}, opts.requeueTimeout); - requeue = true; - return false; // break each loop - } - else { - log('could not determine size of image: '+this.src, this.cycleW, this.cycleH); - } - } - } - return true; - }); - - if (requeue) - return false; - - opts.cssBefore = opts.cssBefore || {}; - opts.animIn = opts.animIn || {}; - opts.animOut = opts.animOut || {}; - - $slides.not(':eq('+first+')').css(opts.cssBefore); - if (opts.cssFirst) - $($slides[first]).css(opts.cssFirst); - - if (opts.timeout) { - opts.timeout = parseInt(opts.timeout); - // ensure that timeout and speed settings are sane - if (opts.speed.constructor == String) - opts.speed = $.fx.speeds[opts.speed] || parseInt(opts.speed); - if (!opts.sync) - opts.speed = opts.speed / 2; - - var buffer = opts.fx == 'shuffle' ? 500 : 250; - while((opts.timeout - opts.speed) < buffer) // sanitize timeout - opts.timeout += opts.speed; - } - if (opts.easing) - opts.easeIn = opts.easeOut = opts.easing; - if (!opts.speedIn) - opts.speedIn = opts.speed; - if (!opts.speedOut) - opts.speedOut = opts.speed; - - opts.slideCount = els.length; - opts.currSlide = opts.lastSlide = first; - if (opts.random) { - if (++opts.randomIndex == els.length) - opts.randomIndex = 0; - opts.nextSlide = opts.randomMap[opts.randomIndex]; - } - else if (opts.backwards) - opts.nextSlide = opts.startingSlide == 0 ? (els.length-1) : opts.startingSlide-1; - else - opts.nextSlide = opts.startingSlide >= (els.length-1) ? 0 : opts.startingSlide+1; - - // run transition init fn - if (!opts.multiFx) { - var init = $.fn.cycle.transitions[opts.fx]; - if ($.isFunction(init)) - init($cont, $slides, opts); - else if (opts.fx != 'custom' && !opts.multiFx) { - log('unknown transition: ' + opts.fx,'; slideshow terminating'); - return false; - } - } - - // fire artificial events - var e0 = $slides[first]; - if (opts.before.length) - opts.before[0].apply(e0, [e0, e0, opts, true]); - if (opts.after.length > 1) - opts.after[1].apply(e0, [e0, e0, opts, true]); - - if (opts.next) - $(opts.next).bind(opts.prevNextEvent,function(){return advance(opts,opts.rev?-1:1)}); - if (opts.prev) - $(opts.prev).bind(opts.prevNextEvent,function(){return advance(opts,opts.rev?1:-1)}); - if (opts.pager || opts.pagerAnchorBuilder) - buildPager(els,opts); - - exposeAddSlide(opts, els); - - return opts; -}; - -// save off original opts so we can restore after clearing state -function saveOriginalOpts(opts) { - opts.original = { before: [], after: [] }; - opts.original.cssBefore = $.extend({}, opts.cssBefore); - opts.original.cssAfter = $.extend({}, opts.cssAfter); - opts.original.animIn = $.extend({}, opts.animIn); - opts.original.animOut = $.extend({}, opts.animOut); - $.each(opts.before, function() { opts.original.before.push(this); }); - $.each(opts.after, function() { opts.original.after.push(this); }); -}; - -function supportMultiTransitions(opts) { - var i, tx, txs = $.fn.cycle.transitions; - // look for multiple effects - if (opts.fx.indexOf(',') > 0) { - opts.multiFx = true; - opts.fxs = opts.fx.replace(/\s*/g,'').split(','); - // discard any bogus effect names - for (i=0; i < opts.fxs.length; i++) { - var fx = opts.fxs[i]; - tx = txs[fx]; - if (!tx || !txs.hasOwnProperty(fx) || !$.isFunction(tx)) { - log('discarding unknown transition: ',fx); - opts.fxs.splice(i,1); - i--; - } - } - // if we have an empty list then we threw everything away! - if (!opts.fxs.length) { - log('No valid transitions named; slideshow terminating.'); - return false; - } - } - else if (opts.fx == 'all') { // auto-gen the list of transitions - opts.multiFx = true; - opts.fxs = []; - for (p in txs) { - tx = txs[p]; - if (txs.hasOwnProperty(p) && $.isFunction(tx)) - opts.fxs.push(p); - } - } - if (opts.multiFx && opts.randomizeEffects) { - // munge the fxs array to make effect selection random - var r1 = Math.floor(Math.random() * 20) + 30; - for (i = 0; i < r1; i++) { - var r2 = Math.floor(Math.random() * opts.fxs.length); - opts.fxs.push(opts.fxs.splice(r2,1)[0]); - } - debug('randomized fx sequence: ',opts.fxs); - } - return true; -}; - -// provide a mechanism for adding slides after the slideshow has started -function exposeAddSlide(opts, els) { - opts.addSlide = function(newSlide, prepend) { - var $s = $(newSlide), s = $s[0]; - if (!opts.autostopCount) - opts.countdown++; - els[prepend?'unshift':'push'](s); - if (opts.els) - opts.els[prepend?'unshift':'push'](s); // shuffle needs this - opts.slideCount = els.length; - - $s.css('position','absolute'); - $s[prepend?'prependTo':'appendTo'](opts.$cont); - - if (prepend) { - opts.currSlide++; - opts.nextSlide++; - } - - if (!$.support.opacity && opts.cleartype && !opts.cleartypeNoBg) - clearTypeFix($s); - - if (opts.fit && opts.width) - $s.width(opts.width); - if (opts.fit && opts.height && opts.height != 'auto') - $slides.height(opts.height); - s.cycleH = (opts.fit && opts.height) ? opts.height : $s.height(); - s.cycleW = (opts.fit && opts.width) ? opts.width : $s.width(); - - $s.css(opts.cssBefore); - - if (opts.pager || opts.pagerAnchorBuilder) - $.fn.cycle.createPagerAnchor(els.length-1, s, $(opts.pager), els, opts); - - if ($.isFunction(opts.onAddSlide)) - opts.onAddSlide($s); - else - $s.hide(); // default behavior - }; -} - -// reset internal state; we do this on every pass in order to support multiple effects -$.fn.cycle.resetState = function(opts, fx) { - fx = fx || opts.fx; - opts.before = []; opts.after = []; - opts.cssBefore = $.extend({}, opts.original.cssBefore); - opts.cssAfter = $.extend({}, opts.original.cssAfter); - opts.animIn = $.extend({}, opts.original.animIn); - opts.animOut = $.extend({}, opts.original.animOut); - opts.fxFn = null; - $.each(opts.original.before, function() { opts.before.push(this); }); - $.each(opts.original.after, function() { opts.after.push(this); }); - - // re-init - var init = $.fn.cycle.transitions[fx]; - if ($.isFunction(init)) - init(opts.$cont, $(opts.elements), opts); -}; - -// this is the main engine fn, it handles the timeouts, callbacks and slide index mgmt -function go(els, opts, manual, fwd) { - // opts.busy is true if we're in the middle of an animation - if (manual && opts.busy && opts.manualTrump) { - // let manual transitions requests trump active ones - debug('manualTrump in go(), stopping active transition'); - $(els).stop(true,true); - opts.busy = false; - } - // don't begin another timeout-based transition if there is one active - if (opts.busy) { - debug('transition active, ignoring new tx request'); - return; - } - - var p = opts.$cont[0], curr = els[opts.currSlide], next = els[opts.nextSlide]; - - // stop cycling if we have an outstanding stop request - if (p.cycleStop != opts.stopCount || p.cycleTimeout === 0 && !manual) - return; - - // check to see if we should stop cycling based on autostop options - if (!manual && !p.cyclePause && !opts.bounce && - ((opts.autostop && (--opts.countdown <= 0)) || - (opts.nowrap && !opts.random && opts.nextSlide < opts.currSlide))) { - if (opts.end) - opts.end(opts); - return; - } - - // if slideshow is paused, only transition on a manual trigger - var changed = false; - if ((manual || !p.cyclePause) && (opts.nextSlide != opts.currSlide)) { - changed = true; - var fx = opts.fx; - // keep trying to get the slide size if we don't have it yet - curr.cycleH = curr.cycleH || $(curr).height(); - curr.cycleW = curr.cycleW || $(curr).width(); - next.cycleH = next.cycleH || $(next).height(); - next.cycleW = next.cycleW || $(next).width(); - - // support multiple transition types - if (opts.multiFx) { - if (opts.lastFx == undefined || ++opts.lastFx >= opts.fxs.length) - opts.lastFx = 0; - fx = opts.fxs[opts.lastFx]; - opts.currFx = fx; - } - - // one-time fx overrides apply to: $('div').cycle(3,'zoom'); - if (opts.oneTimeFx) { - fx = opts.oneTimeFx; - opts.oneTimeFx = null; - } - - $.fn.cycle.resetState(opts, fx); - - // run the before callbacks - if (opts.before.length) - $.each(opts.before, function(i,o) { - if (p.cycleStop != opts.stopCount) return; - o.apply(next, [curr, next, opts, fwd]); - }); - - // stage the after callacks - var after = function() { - $.each(opts.after, function(i,o) { - if (p.cycleStop != opts.stopCount) return; - o.apply(next, [curr, next, opts, fwd]); - }); - }; - - debug('tx firing; currSlide: ' + opts.currSlide + '; nextSlide: ' + opts.nextSlide); - - // get ready to perform the transition - opts.busy = 1; - if (opts.fxFn) // fx function provided? - opts.fxFn(curr, next, opts, after, fwd, manual && opts.fastOnEvent); - else if ($.isFunction($.fn.cycle[opts.fx])) // fx plugin ? - $.fn.cycle[opts.fx](curr, next, opts, after, fwd, manual && opts.fastOnEvent); - else - $.fn.cycle.custom(curr, next, opts, after, fwd, manual && opts.fastOnEvent); - } - - if (changed || opts.nextSlide == opts.currSlide) { - // calculate the next slide - opts.lastSlide = opts.currSlide; - if (opts.random) { - opts.currSlide = opts.nextSlide; - if (++opts.randomIndex == els.length) - opts.randomIndex = 0; - opts.nextSlide = opts.randomMap[opts.randomIndex]; - if (opts.nextSlide == opts.currSlide) - opts.nextSlide = (opts.currSlide == opts.slideCount - 1) ? 0 : opts.currSlide + 1; - } - else if (opts.backwards) { - var roll = (opts.nextSlide - 1) < 0; - if (roll && opts.bounce) { - opts.backwards = !opts.backwards; - opts.nextSlide = 1; - opts.currSlide = 0; - } - else { - opts.nextSlide = roll ? (els.length-1) : opts.nextSlide-1; - opts.currSlide = roll ? 0 : opts.nextSlide+1; - } - } - else { // sequence - var roll = (opts.nextSlide + 1) == els.length; - if (roll && opts.bounce) { - opts.backwards = !opts.backwards; - opts.nextSlide = els.length-2; - opts.currSlide = els.length-1; - } - else { - opts.nextSlide = roll ? 0 : opts.nextSlide+1; - opts.currSlide = roll ? els.length-1 : opts.nextSlide-1; - } - } - } - if (changed && opts.pager) - opts.updateActivePagerLink(opts.pager, opts.currSlide, opts.activePagerClass); - - // stage the next transition - var ms = 0; - if (opts.timeout && !opts.continuous) - ms = getTimeout(els[opts.currSlide], els[opts.nextSlide], opts, fwd); - else if (opts.continuous && p.cyclePause) // continuous shows work off an after callback, not this timer logic - ms = 10; - if (ms > 0) - p.cycleTimeout = setTimeout(function(){ go(els, opts, 0, (!opts.rev && !opts.backwards)) }, ms); -}; - -// invoked after transition -$.fn.cycle.updateActivePagerLink = function(pager, currSlide, clsName) { - $(pager).each(function() { - $(this).children().removeClass(clsName).eq(currSlide).addClass(clsName); - }); -}; - -// calculate timeout value for current transition -function getTimeout(curr, next, opts, fwd) { - if (opts.timeoutFn) { - // call user provided calc fn - var t = opts.timeoutFn.call(curr,curr,next,opts,fwd); - while ((t - opts.speed) < 250) // sanitize timeout - t += opts.speed; - debug('calculated timeout: ' + t + '; speed: ' + opts.speed); - if (t !== false) - return t; - } - return opts.timeout; -}; - -// expose next/prev function, caller must pass in state -$.fn.cycle.next = function(opts) { advance(opts, opts.rev?-1:1); }; -$.fn.cycle.prev = function(opts) { advance(opts, opts.rev?1:-1);}; - -// advance slide forward or back -function advance(opts, val) { - var els = opts.elements; - var p = opts.$cont[0], timeout = p.cycleTimeout; - if (timeout) { - clearTimeout(timeout); - p.cycleTimeout = 0; - } - if (opts.random && val < 0) { - // move back to the previously display slide - opts.randomIndex--; - if (--opts.randomIndex == -2) - opts.randomIndex = els.length-2; - else if (opts.randomIndex == -1) - opts.randomIndex = els.length-1; - opts.nextSlide = opts.randomMap[opts.randomIndex]; - } - else if (opts.random) { - opts.nextSlide = opts.randomMap[opts.randomIndex]; - } - else { - opts.nextSlide = opts.currSlide + val; - if (opts.nextSlide < 0) { - if (opts.nowrap) return false; - opts.nextSlide = els.length - 1; - } - else if (opts.nextSlide >= els.length) { - if (opts.nowrap) return false; - opts.nextSlide = 0; - } - } - - var cb = opts.onPrevNextEvent || opts.prevNextClick; // prevNextClick is deprecated - if ($.isFunction(cb)) - cb(val > 0, opts.nextSlide, els[opts.nextSlide]); - go(els, opts, 1, val>=0); - return false; -}; - -function buildPager(els, opts) { - var $p = $(opts.pager); - $.each(els, function(i,o) { - $.fn.cycle.createPagerAnchor(i,o,$p,els,opts); - }); - opts.updateActivePagerLink(opts.pager, opts.startingSlide, opts.activePagerClass); -}; - -$.fn.cycle.createPagerAnchor = function(i, el, $p, els, opts) { - var a; - if ($.isFunction(opts.pagerAnchorBuilder)) { - a = opts.pagerAnchorBuilder(i,el); - debug('pagerAnchorBuilder('+i+', el) returned: ' + a); - } - else - a = ''+(i+1)+''; - - if (!a) - return; - var $a = $(a); - // don't reparent if anchor is in the dom - if ($a.parents('body').length === 0) { - var arr = []; - if ($p.length > 1) { - $p.each(function() { - var $clone = $a.clone(true); - $(this).append($clone); - arr.push($clone[0]); - }); - $a = $(arr); - } - else { - $a.appendTo($p); - } - } - - opts.pagerAnchors = opts.pagerAnchors || []; - opts.pagerAnchors.push($a); - $a.bind(opts.pagerEvent, function(e) { - e.preventDefault(); - opts.nextSlide = i; - var p = opts.$cont[0], timeout = p.cycleTimeout; - if (timeout) { - clearTimeout(timeout); - p.cycleTimeout = 0; - } - var cb = opts.onPagerEvent || opts.pagerClick; // pagerClick is deprecated - if ($.isFunction(cb)) - cb(opts.nextSlide, els[opts.nextSlide]); - go(els,opts,1,opts.currSlide < i); // trigger the trans -// return false; // <== allow bubble - }); - - if ( ! /^click/.test(opts.pagerEvent) && !opts.allowPagerClickBubble) - $a.bind('click.cycle', function(){return false;}); // suppress click - - if (opts.pauseOnPagerHover) - $a.hover(function() { opts.$cont[0].cyclePause++; }, function() { opts.$cont[0].cyclePause--; } ); -}; - -// helper fn to calculate the number of slides between the current and the next -$.fn.cycle.hopsFromLast = function(opts, fwd) { - var hops, l = opts.lastSlide, c = opts.currSlide; - if (fwd) - hops = c > l ? c - l : opts.slideCount - l; - else - hops = c < l ? l - c : l + opts.slideCount - c; - return hops; -}; - -// fix clearType problems in ie6 by setting an explicit bg color -// (otherwise text slides look horrible during a fade transition) -function clearTypeFix($slides) { - debug('applying clearType background-color hack'); - function hex(s) { - s = parseInt(s).toString(16); - return s.length < 2 ? '0'+s : s; - }; - function getBg(e) { - for ( ; e && e.nodeName.toLowerCase() != 'html'; e = e.parentNode) { - var v = $.css(e,'background-color'); - if (v.indexOf('rgb') >= 0 ) { - var rgb = v.match(/\d+/g); - return '#'+ hex(rgb[0]) + hex(rgb[1]) + hex(rgb[2]); - } - if (v && v != 'transparent') - return v; - } - return '#ffffff'; - }; - $slides.each(function() { $(this).css('background-color', getBg(this)); }); -}; - -// reset common props before the next transition -$.fn.cycle.commonReset = function(curr,next,opts,w,h,rev) { - $(opts.elements).not(curr).hide(); - opts.cssBefore.opacity = 1; - opts.cssBefore.display = 'block'; - if (w !== false && next.cycleW > 0) - opts.cssBefore.width = next.cycleW; - if (h !== false && next.cycleH > 0) - opts.cssBefore.height = next.cycleH; - opts.cssAfter = opts.cssAfter || {}; - opts.cssAfter.display = 'none'; - $(curr).css('zIndex',opts.slideCount + (rev === true ? 1 : 0)); - $(next).css('zIndex',opts.slideCount + (rev === true ? 0 : 1)); -}; - -// the actual fn for effecting a transition -$.fn.cycle.custom = function(curr, next, opts, cb, fwd, speedOverride) { - var $l = $(curr), $n = $(next); - var speedIn = opts.speedIn, speedOut = opts.speedOut, easeIn = opts.easeIn, easeOut = opts.easeOut; - $n.css(opts.cssBefore); - if (speedOverride) { - if (typeof speedOverride == 'number') - speedIn = speedOut = speedOverride; - else - speedIn = speedOut = 1; - easeIn = easeOut = null; - } - var fn = function() {$n.animate(opts.animIn, speedIn, easeIn, cb)}; - $l.animate(opts.animOut, speedOut, easeOut, function() { - if (opts.cssAfter) $l.css(opts.cssAfter); - if (!opts.sync) fn(); - }); - if (opts.sync) fn(); -}; - -// transition definitions - only fade is defined here, transition pack defines the rest -$.fn.cycle.transitions = { - fade: function($cont, $slides, opts) { - $slides.not(':eq('+opts.currSlide+')').css('opacity',0); - opts.before.push(function(curr,next,opts) { - $.fn.cycle.commonReset(curr,next,opts); - opts.cssBefore.opacity = 0; - }); - opts.animIn = { opacity: 1 }; - opts.animOut = { opacity: 0 }; - opts.cssBefore = { top: 0, left: 0 }; - } -}; - -$.fn.cycle.ver = function() { return ver; }; - -// override these globally if you like (they are all optional) -$.fn.cycle.defaults = { - fx: 'fade', // name of transition effect (or comma separated names, ex: 'fade,scrollUp,shuffle') - timeout: 4000, // milliseconds between slide transitions (0 to disable auto advance) - timeoutFn: null, // callback for determining per-slide timeout value: function(currSlideElement, nextSlideElement, options, forwardFlag) - continuous: 0, // true to start next transition immediately after current one completes - speed: 1000, // speed of the transition (any valid fx speed value) - speedIn: null, // speed of the 'in' transition - speedOut: null, // speed of the 'out' transition - next: null, // selector for element to use as event trigger for next slide - prev: null, // selector for element to use as event trigger for previous slide -// prevNextClick: null, // @deprecated; please use onPrevNextEvent instead - onPrevNextEvent: null, // callback fn for prev/next events: function(isNext, zeroBasedSlideIndex, slideElement) - prevNextEvent:'click.cycle',// event which drives the manual transition to the previous or next slide - pager: null, // selector for element to use as pager container - //pagerClick null, // @deprecated; please use onPagerEvent instead - onPagerEvent: null, // callback fn for pager events: function(zeroBasedSlideIndex, slideElement) - pagerEvent: 'click.cycle', // name of event which drives the pager navigation - allowPagerClickBubble: false, // allows or prevents click event on pager anchors from bubbling - pagerAnchorBuilder: null, // callback fn for building anchor links: function(index, DOMelement) - before: null, // transition callback (scope set to element to be shown): function(currSlideElement, nextSlideElement, options, forwardFlag) - after: null, // transition callback (scope set to element that was shown): function(currSlideElement, nextSlideElement, options, forwardFlag) - end: null, // callback invoked when the slideshow terminates (use with autostop or nowrap options): function(options) - easing: null, // easing method for both in and out transitions - easeIn: null, // easing for "in" transition - easeOut: null, // easing for "out" transition - shuffle: null, // coords for shuffle animation, ex: { top:15, left: 200 } - animIn: null, // properties that define how the slide animates in - animOut: null, // properties that define how the slide animates out - cssBefore: null, // properties that define the initial state of the slide before transitioning in - cssAfter: null, // properties that defined the state of the slide after transitioning out - fxFn: null, // function used to control the transition: function(currSlideElement, nextSlideElement, options, afterCalback, forwardFlag) - height: 'auto', // container height - startingSlide: 0, // zero-based index of the first slide to be displayed - sync: 1, // true if in/out transitions should occur simultaneously - random: 0, // true for random, false for sequence (not applicable to shuffle fx) - fit: 0, // force slides to fit container - containerResize: 1, // resize container to fit largest slide - pause: 0, // true to enable "pause on hover" - pauseOnPagerHover: 0, // true to pause when hovering over pager link - autostop: 0, // true to end slideshow after X transitions (where X == slide count) - autostopCount: 0, // number of transitions (optionally used with autostop to define X) - delay: 0, // additional delay (in ms) for first transition (hint: can be negative) - slideExpr: null, // expression for selecting slides (if something other than all children is required) - cleartype: !$.support.opacity, // true if clearType corrections should be applied (for IE) - cleartypeNoBg: false, // set to true to disable extra cleartype fixing (leave false to force background color setting on slides) - nowrap: 0, // true to prevent slideshow from wrapping - fastOnEvent: 0, // force fast transitions when triggered manually (via pager or prev/next); value == time in ms - randomizeEffects: 1, // valid when multiple effects are used; true to make the effect sequence random - rev: 0, // causes animations to transition in reverse - manualTrump: true, // causes manual transition to stop an active transition instead of being ignored - requeueOnImageNotLoaded: true, // requeue the slideshow if any image slides are not yet loaded - requeueTimeout: 250, // ms delay for requeue - activePagerClass: 'activeSlide', // class name used for the active pager link - updateActivePagerLink: null, // callback fn invoked to update the active pager link (adds/removes activePagerClass style) - backwards: false // true to start slideshow at last slide and move backwards through the stack -}; - -})(jQuery); - - -/*! - * jQuery Cycle Plugin Transition Definitions - * This script is a plugin for the jQuery Cycle Plugin - * Examples and documentation at: http://malsup.com/jquery/cycle/ - * Copyright (c) 2007-2010 M. Alsup - * Version: 2.72 - * Dual licensed under the MIT and GPL licenses: - * http://www.opensource.org/licenses/mit-license.php - * http://www.gnu.org/licenses/gpl.html - */ -(function($) { - -// -// These functions define one-time slide initialization for the named -// transitions. To save file size feel free to remove any of these that you -// don't need. -// -$.fn.cycle.transitions.none = function($cont, $slides, opts) { - opts.fxFn = function(curr,next,opts,after){ - $(next).show(); - $(curr).hide(); - after(); - }; -} - -// scrollUp/Down/Left/Right -$.fn.cycle.transitions.scrollUp = function($cont, $slides, opts) { - $cont.css('overflow','hidden'); - opts.before.push($.fn.cycle.commonReset); - var h = $cont.height(); - opts.cssBefore ={ top: h, left: 0 }; - opts.cssFirst = { top: 0 }; - opts.animIn = { top: 0 }; - opts.animOut = { top: -h }; -}; -$.fn.cycle.transitions.scrollDown = function($cont, $slides, opts) { - $cont.css('overflow','hidden'); - opts.before.push($.fn.cycle.commonReset); - var h = $cont.height(); - opts.cssFirst = { top: 0 }; - opts.cssBefore= { top: -h, left: 0 }; - opts.animIn = { top: 0 }; - opts.animOut = { top: h }; -}; -$.fn.cycle.transitions.scrollLeft = function($cont, $slides, opts) { - $cont.css('overflow','hidden'); - opts.before.push($.fn.cycle.commonReset); - var w = $cont.width(); - opts.cssFirst = { left: 0 }; - opts.cssBefore= { left: w, top: 0 }; - opts.animIn = { left: 0 }; - opts.animOut = { left: 0-w }; -}; -$.fn.cycle.transitions.scrollRight = function($cont, $slides, opts) { - $cont.css('overflow','hidden'); - opts.before.push($.fn.cycle.commonReset); - var w = $cont.width(); - opts.cssFirst = { left: 0 }; - opts.cssBefore= { left: -w, top: 0 }; - opts.animIn = { left: 0 }; - opts.animOut = { left: w }; -}; -$.fn.cycle.transitions.scrollHorz = function($cont, $slides, opts) { - $cont.css('overflow','hidden').width(); - opts.before.push(function(curr, next, opts, fwd) { - $.fn.cycle.commonReset(curr,next,opts); - opts.cssBefore.left = fwd ? (next.cycleW-1) : (1-next.cycleW); - opts.animOut.left = fwd ? -curr.cycleW : curr.cycleW; - }); - opts.cssFirst = { left: 0 }; - opts.cssBefore= { top: 0 }; - opts.animIn = { left: 0 }; - opts.animOut = { top: 0 }; -}; -$.fn.cycle.transitions.scrollVert = function($cont, $slides, opts) { - $cont.css('overflow','hidden'); - opts.before.push(function(curr, next, opts, fwd) { - $.fn.cycle.commonReset(curr,next,opts); - opts.cssBefore.top = fwd ? (1-next.cycleH) : (next.cycleH-1); - opts.animOut.top = fwd ? curr.cycleH : -curr.cycleH; - }); - opts.cssFirst = { top: 0 }; - opts.cssBefore= { left: 0 }; - opts.animIn = { top: 0 }; - opts.animOut = { left: 0 }; -}; - -// slideX/slideY -$.fn.cycle.transitions.slideX = function($cont, $slides, opts) { - opts.before.push(function(curr, next, opts) { - $(opts.elements).not(curr).hide(); - $.fn.cycle.commonReset(curr,next,opts,false,true); - opts.animIn.width = next.cycleW; - }); - opts.cssBefore = { left: 0, top: 0, width: 0 }; - opts.animIn = { width: 'show' }; - opts.animOut = { width: 0 }; -}; -$.fn.cycle.transitions.slideY = function($cont, $slides, opts) { - opts.before.push(function(curr, next, opts) { - $(opts.elements).not(curr).hide(); - $.fn.cycle.commonReset(curr,next,opts,true,false); - opts.animIn.height = next.cycleH; - }); - opts.cssBefore = { left: 0, top: 0, height: 0 }; - opts.animIn = { height: 'show' }; - opts.animOut = { height: 0 }; -}; - -// shuffle -$.fn.cycle.transitions.shuffle = function($cont, $slides, opts) { - var i, w = $cont.css('overflow', 'visible').width(); - $slides.css({left: 0, top: 0}); - opts.before.push(function(curr,next,opts) { - $.fn.cycle.commonReset(curr,next,opts,true,true,true); - }); - // only adjust speed once! - if (!opts.speedAdjusted) { - opts.speed = opts.speed / 2; // shuffle has 2 transitions - opts.speedAdjusted = true; - } - opts.random = 0; - opts.shuffle = opts.shuffle || {left:-w, top:15}; - opts.els = []; - for (i=0; i < $slides.length; i++) - opts.els.push($slides[i]); - - for (i=0; i < opts.currSlide; i++) - opts.els.push(opts.els.shift()); - - // custom transition fn (hat tip to Benjamin Sterling for this bit of sweetness!) - opts.fxFn = function(curr, next, opts, cb, fwd) { - var $el = fwd ? $(curr) : $(next); - $(next).css(opts.cssBefore); - var count = opts.slideCount; - $el.animate(opts.shuffle, opts.speedIn, opts.easeIn, function() { - var hops = $.fn.cycle.hopsFromLast(opts, fwd); - for (var k=0; k < hops; k++) - fwd ? opts.els.push(opts.els.shift()) : opts.els.unshift(opts.els.pop()); - if (fwd) { - for (var i=0, len=opts.els.length; i < len; i++) - $(opts.els[i]).css('z-index', len-i+count); - } - else { - var z = $(curr).css('z-index'); - $el.css('z-index', parseInt(z)+1+count); - } - $el.animate({left:0, top:0}, opts.speedOut, opts.easeOut, function() { - $(fwd ? this : curr).hide(); - if (cb) cb(); - }); - }); - }; - opts.cssBefore = { display: 'block', opacity: 1, top: 0, left: 0 }; -}; - -// turnUp/Down/Left/Right -$.fn.cycle.transitions.turnUp = function($cont, $slides, opts) { - opts.before.push(function(curr, next, opts) { - $.fn.cycle.commonReset(curr,next,opts,true,false); - opts.cssBefore.top = next.cycleH; - opts.animIn.height = next.cycleH; - }); - opts.cssFirst = { top: 0 }; - opts.cssBefore = { left: 0, height: 0 }; - opts.animIn = { top: 0 }; - opts.animOut = { height: 0 }; -}; -$.fn.cycle.transitions.turnDown = function($cont, $slides, opts) { - opts.before.push(function(curr, next, opts) { - $.fn.cycle.commonReset(curr,next,opts,true,false); - opts.animIn.height = next.cycleH; - opts.animOut.top = curr.cycleH; - }); - opts.cssFirst = { top: 0 }; - opts.cssBefore = { left: 0, top: 0, height: 0 }; - opts.animOut = { height: 0 }; -}; -$.fn.cycle.transitions.turnLeft = function($cont, $slides, opts) { - opts.before.push(function(curr, next, opts) { - $.fn.cycle.commonReset(curr,next,opts,false,true); - opts.cssBefore.left = next.cycleW; - opts.animIn.width = next.cycleW; - }); - opts.cssBefore = { top: 0, width: 0 }; - opts.animIn = { left: 0 }; - opts.animOut = { width: 0 }; -}; -$.fn.cycle.transitions.turnRight = function($cont, $slides, opts) { - opts.before.push(function(curr, next, opts) { - $.fn.cycle.commonReset(curr,next,opts,false,true); - opts.animIn.width = next.cycleW; - opts.animOut.left = curr.cycleW; - }); - opts.cssBefore = { top: 0, left: 0, width: 0 }; - opts.animIn = { left: 0 }; - opts.animOut = { width: 0 }; -}; - -// zoom -$.fn.cycle.transitions.zoom = function($cont, $slides, opts) { - opts.before.push(function(curr, next, opts) { - $.fn.cycle.commonReset(curr,next,opts,false,false,true); - opts.cssBefore.top = next.cycleH/2; - opts.cssBefore.left = next.cycleW/2; - opts.animIn = { top: 0, left: 0, width: next.cycleW, height: next.cycleH }; - opts.animOut = { width: 0, height: 0, top: curr.cycleH/2, left: curr.cycleW/2 }; - }); - opts.cssFirst = { top:0, left: 0 }; - opts.cssBefore = { width: 0, height: 0 }; -}; - -// fadeZoom -$.fn.cycle.transitions.fadeZoom = function($cont, $slides, opts) { - opts.before.push(function(curr, next, opts) { - $.fn.cycle.commonReset(curr,next,opts,false,false); - opts.cssBefore.left = next.cycleW/2; - opts.cssBefore.top = next.cycleH/2; - opts.animIn = { top: 0, left: 0, width: next.cycleW, height: next.cycleH }; - }); - opts.cssBefore = { width: 0, height: 0 }; - opts.animOut = { opacity: 0 }; -}; - -// blindX -$.fn.cycle.transitions.blindX = function($cont, $slides, opts) { - var w = $cont.css('overflow','hidden').width(); - opts.before.push(function(curr, next, opts) { - $.fn.cycle.commonReset(curr,next,opts); - opts.animIn.width = next.cycleW; - opts.animOut.left = curr.cycleW; - }); - opts.cssBefore = { left: w, top: 0 }; - opts.animIn = { left: 0 }; - opts.animOut = { left: w }; -}; -// blindY -$.fn.cycle.transitions.blindY = function($cont, $slides, opts) { - var h = $cont.css('overflow','hidden').height(); - opts.before.push(function(curr, next, opts) { - $.fn.cycle.commonReset(curr,next,opts); - opts.animIn.height = next.cycleH; - opts.animOut.top = curr.cycleH; - }); - opts.cssBefore = { top: h, left: 0 }; - opts.animIn = { top: 0 }; - opts.animOut = { top: h }; -}; -// blindZ -$.fn.cycle.transitions.blindZ = function($cont, $slides, opts) { - var h = $cont.css('overflow','hidden').height(); - var w = $cont.width(); - opts.before.push(function(curr, next, opts) { - $.fn.cycle.commonReset(curr,next,opts); - opts.animIn.height = next.cycleH; - opts.animOut.top = curr.cycleH; - }); - opts.cssBefore = { top: h, left: w }; - opts.animIn = { top: 0, left: 0 }; - opts.animOut = { top: h, left: w }; -}; - -// growX - grow horizontally from centered 0 width -$.fn.cycle.transitions.growX = function($cont, $slides, opts) { - opts.before.push(function(curr, next, opts) { - $.fn.cycle.commonReset(curr,next,opts,false,true); - opts.cssBefore.left = this.cycleW/2; - opts.animIn = { left: 0, width: this.cycleW }; - opts.animOut = { left: 0 }; - }); - opts.cssBefore = { width: 0, top: 0 }; -}; -// growY - grow vertically from centered 0 height -$.fn.cycle.transitions.growY = function($cont, $slides, opts) { - opts.before.push(function(curr, next, opts) { - $.fn.cycle.commonReset(curr,next,opts,true,false); - opts.cssBefore.top = this.cycleH/2; - opts.animIn = { top: 0, height: this.cycleH }; - opts.animOut = { top: 0 }; - }); - opts.cssBefore = { height: 0, left: 0 }; -}; - -// curtainX - squeeze in both edges horizontally -$.fn.cycle.transitions.curtainX = function($cont, $slides, opts) { - opts.before.push(function(curr, next, opts) { - $.fn.cycle.commonReset(curr,next,opts,false,true,true); - opts.cssBefore.left = next.cycleW/2; - opts.animIn = { left: 0, width: this.cycleW }; - opts.animOut = { left: curr.cycleW/2, width: 0 }; - }); - opts.cssBefore = { top: 0, width: 0 }; -}; -// curtainY - squeeze in both edges vertically -$.fn.cycle.transitions.curtainY = function($cont, $slides, opts) { - opts.before.push(function(curr, next, opts) { - $.fn.cycle.commonReset(curr,next,opts,true,false,true); - opts.cssBefore.top = next.cycleH/2; - opts.animIn = { top: 0, height: next.cycleH }; - opts.animOut = { top: curr.cycleH/2, height: 0 }; - }); - opts.cssBefore = { left: 0, height: 0 }; -}; - -// cover - curr slide covered by next slide -$.fn.cycle.transitions.cover = function($cont, $slides, opts) { - var d = opts.direction || 'left'; - var w = $cont.css('overflow','hidden').width(); - var h = $cont.height(); - opts.before.push(function(curr, next, opts) { - $.fn.cycle.commonReset(curr,next,opts); - if (d == 'right') - opts.cssBefore.left = -w; - else if (d == 'up') - opts.cssBefore.top = h; - else if (d == 'down') - opts.cssBefore.top = -h; - else - opts.cssBefore.left = w; - }); - opts.animIn = { left: 0, top: 0}; - opts.animOut = { opacity: 1 }; - opts.cssBefore = { top: 0, left: 0 }; -}; - -// uncover - curr slide moves off next slide -$.fn.cycle.transitions.uncover = function($cont, $slides, opts) { - var d = opts.direction || 'left'; - var w = $cont.css('overflow','hidden').width(); - var h = $cont.height(); - opts.before.push(function(curr, next, opts) { - $.fn.cycle.commonReset(curr,next,opts,true,true,true); - if (d == 'right') - opts.animOut.left = w; - else if (d == 'up') - opts.animOut.top = -h; - else if (d == 'down') - opts.animOut.top = h; - else - opts.animOut.left = -w; - }); - opts.animIn = { left: 0, top: 0 }; - opts.animOut = { opacity: 1 }; - opts.cssBefore = { top: 0, left: 0 }; -}; - -// toss - move top slide and fade away -$.fn.cycle.transitions.toss = function($cont, $slides, opts) { - var w = $cont.css('overflow','visible').width(); - var h = $cont.height(); - opts.before.push(function(curr, next, opts) { - $.fn.cycle.commonReset(curr,next,opts,true,true,true); - // provide default toss settings if animOut not provided - if (!opts.animOut.left && !opts.animOut.top) - opts.animOut = { left: w*2, top: -h/2, opacity: 0 }; - else - opts.animOut.opacity = 0; - }); - opts.cssBefore = { left: 0, top: 0 }; - opts.animIn = { left: 0 }; -}; - -// wipe - clip animation -$.fn.cycle.transitions.wipe = function($cont, $slides, opts) { - var w = $cont.css('overflow','hidden').width(); - var h = $cont.height(); - opts.cssBefore = opts.cssBefore || {}; - var clip; - if (opts.clip) { - if (/l2r/.test(opts.clip)) - clip = 'rect(0px 0px '+h+'px 0px)'; - else if (/r2l/.test(opts.clip)) - clip = 'rect(0px '+w+'px '+h+'px '+w+'px)'; - else if (/t2b/.test(opts.clip)) - clip = 'rect(0px '+w+'px 0px 0px)'; - else if (/b2t/.test(opts.clip)) - clip = 'rect('+h+'px '+w+'px '+h+'px 0px)'; - else if (/zoom/.test(opts.clip)) { - var top = parseInt(h/2); - var left = parseInt(w/2); - clip = 'rect('+top+'px '+left+'px '+top+'px '+left+'px)'; - } - } - - opts.cssBefore.clip = opts.cssBefore.clip || clip || 'rect(0px 0px 0px 0px)'; - - var d = opts.cssBefore.clip.match(/(\d+)/g); - var t = parseInt(d[0]), r = parseInt(d[1]), b = parseInt(d[2]), l = parseInt(d[3]); - - opts.before.push(function(curr, next, opts) { - if (curr == next) return; - var $curr = $(curr), $next = $(next); - $.fn.cycle.commonReset(curr,next,opts,true,true,false); - opts.cssAfter.display = 'block'; - - var step = 1, count = parseInt((opts.speedIn / 13)) - 1; - (function f() { - var tt = t ? t - parseInt(step * (t/count)) : 0; - var ll = l ? l - parseInt(step * (l/count)) : 0; - var bb = b < h ? b + parseInt(step * ((h-b)/count || 1)) : h; - var rr = r < w ? r + parseInt(step * ((w-r)/count || 1)) : w; - $next.css({ clip: 'rect('+tt+'px '+rr+'px '+bb+'px '+ll+'px)' }); - (step++ <= count) ? setTimeout(f, 13) : $curr.css('display', 'none'); - })(); - }); - opts.cssBefore = { display: 'block', opacity: 1, top: 0, left: 0 }; - opts.animIn = { left: 0 }; - opts.animOut = { left: 0 }; -}; - -})(jQuery); diff --git a/docs/iris/src/_static/logo_banner.png b/docs/iris/src/_static/logo_banner.png deleted file mode 100644 index 4bec22f5dc..0000000000 Binary files a/docs/iris/src/_static/logo_banner.png and /dev/null differ diff --git a/docs/iris/src/_static/style.css b/docs/iris/src/_static/style.css deleted file mode 100644 index 69fa84394e..0000000000 --- a/docs/iris/src/_static/style.css +++ /dev/null @@ -1,99 +0,0 @@ -body { - font-family: 'Noto Sans', sans-serif; -} - -.sidebar { z-index: 10; } - -.highlight { background: none; } - -p.hr_p { - overflow: hidden; - text-align: center; -} -p.hr_p a { - font-size: small; - color: #1C86EE; -} -p.hr_p:before, -p.hr_p:after { - background-color: #abc; - border: 1px solid #abc; - content: ""; - display: inline-block; - height: 1px; - position: relative; - vertical-align: middle; - width: 50%; -} -p.hr_p:before { - right: 0.5em; - margin-left: -50%; -} -p.hr_p:after { - left: 0.5em; - margin-right: -50%; -} - -.header-content { - background-color: white; - text-align: left; - padding: 0px; - height: 149px; -} - -.header-content img { - height: 100px; - vertical-align: middle; - float: left; - margin: 20px 2em 0.8em 4%; - padding: 0px; -} - -.header-content .strapline { - display: inline-block; - width: calc(100% - 110px - 2em - 4%); -} - -.strapline p { - font-size: medium; - font-family: 'Alike', serif; - font-weight: bold; - color: #444444; - max-width: 52ch; - margin-top: 0.25em; -} - -.header-content h1 { - font-size: 3.5rem; - font-family: 'Alike', serif; - margin-top: 40px; - padding: 0px; - color: #323232; - padding-bottom: 0.2em; -} - -.header-content h1 span.version { - font-size: 1.5rem; -} - -.github-forkme { - position: absolute; - top: 0; - right: 80px; - border: 0; -} - -/* Take into account the resizing effect of the page (which has a minimum */ -/* width of 740px + 80px margins). */ -@media screen and (max-width: calc(740px + 80px + 80px)) { - .github-forkme { - right: calc(100% - 740px - 80px); - } -} - -@media screen and (max-width: calc(740px + 80px)) { - .github-forkme { - left: calc(740px + 80px - 149px); - right: 0px; - } -} diff --git a/docs/iris/src/_templates/index.html b/docs/iris/src/_templates/index.html deleted file mode 100644 index c18f0268fa..0000000000 --- a/docs/iris/src/_templates/index.html +++ /dev/null @@ -1,146 +0,0 @@ -{% extends "layout.html" %} -{% set title = 'Iris documentation homepage' %} -{% block extrahead %} -{{ super() }} - - - - - - - - -{% endblock %} - - - -{% block body %} - - - - -

- Iris implements a data model based on the CF conventions - giving you a powerful, format-agnostic interface for working with your data. - It excels when working with multi-dimensional Earth Science data, where tabular - representations become unwieldy and inefficient. -

-

- CF Standard names, - units, and coordinate metadata - are built into Iris, giving you a rich and expressive interface for maintaining - an accurate representation of your data. Its treatment of data and - associated metadata as first-class objects includes: -

-
    -
  • a visualisation interface based on matplotlib and - cartopy,
  • -
  • unit conversion,
  • -
  • subsetting and extraction,
  • -
  • merge and concatenate,
  • -
  • aggregations and reductions (including min, max, mean and weighted averages),
  • -
  • interpolation and regridding (including nearest-neighbor, linear and area-weighted), and
  • -
  • operator overloads (+, -, *, /, etc.).
  • -
-

- A number of file formats are recognised by Iris, including CF-compliant NetCDF, GRIB, - and PP, and it has a plugin architecture to allow other formats to be added seamlessly. -

-

- Building upon NumPy and - dask, - Iris scales from efficient single-machine workflows right through to multi-core - clusters and HPC. - Interoperability with packages from the wider scientific Python ecosystem comes from Iris' - use of standard NumPy/dask arrays as its underlying data storage. -

- -
-
-
-
- -
- -
- - -
- -{% endblock %} diff --git a/docs/iris/src/_templates/layout.html b/docs/iris/src/_templates/layout.html deleted file mode 100644 index f854455f71..0000000000 --- a/docs/iris/src/_templates/layout.html +++ /dev/null @@ -1,71 +0,0 @@ -{% extends "!layout.html" %} - -{%- block extrahead %} -{{ super() }} - - - - - - - - - - -{% endblock %} - - -{% block rootrellink %} -
  • home
  • -
  • examples
  • -
  • gallery
  • -
  • contents
  • -{% endblock %} - - -{% block relbar1 %} - - - Fork Iris on GitHub - - - -
    - - Iris logo - -
    -

    - Iris v3.0 -

    -

    - A powerful, format-agnostic, community-driven Python library for analysing and - visualising Earth science data. -

    -
    -
    - -{{ super() }} -{% endblock %} - - - -{% block footer %} - - - - - -{% endblock %} diff --git a/docs/iris/src/conf.py b/docs/iris/src/conf.py deleted file mode 100644 index 6cdfe634c4..0000000000 --- a/docs/iris/src/conf.py +++ /dev/null @@ -1,332 +0,0 @@ -# (C) British Crown Copyright 2010 - 2018, Met Office -# -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -# -*- coding: utf-8 -*- -# -# Iris documentation build configuration file, created by -# sphinx-quickstart on Tue May 25 13:26:23 2010. -# -# This file is execfile()d with the current directory set to its containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import datetime -import os -import sys - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.append(os.path.abspath('sphinxext')) - -# add some sample files from the developers guide.. -sys.path.append(os.path.abspath(os.path.join('developers_guide'))) - - -# -- General configuration ----------------------------------------------------- - -# Temporary value for use by LaTeX and 'man' output. -# Deleted at the end of the module. -_authors = ('Iris developers') - -# If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be extensions -# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', - 'sphinx.ext.autosummary', - 'sphinx.ext.coverage', - 'sphinx.ext.doctest', - 'sphinx.ext.extlinks', - 'sphinx.ext.graphviz', - 'sphinx.ext.imgmath', - 'sphinx.ext.intersphinx', - 'matplotlib.sphinxext.mathmpl', - 'matplotlib.sphinxext.only_directives', - 'matplotlib.sphinxext.plot_directive', - - # better class documentation - 'custom_class_autodoc', - - # Data instance __repr__ filter. - 'custom_data_autodoc', - - 'gen_example_directory', - 'generate_package_rst', - 'gen_gallery', - - # Add labels to figures automatically - 'auto_label_figures', - ] - -# list of packages to document -autopackage_name = ['iris'] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -#source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'contents' - -# General information about the project. -project = u'Iris' -# define the copyright information for latex builds. Note, for html builds, -# the copyright exists directly inside "_templates/layout.html" -upper_copy_year = datetime.datetime.now().year -copyright = u'British Crown Copyright 2010 - {}, Met Office'.format(upper_copy_year) - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -import iris -# The short X.Y version. -if iris.__version__ == 'dev': - version = 'dev' -else: - # major.feature(.minor)-dev -> major.minor - version = '.'.join(iris.__version__.split('-')[0].split('.')[:2]) -# The full version, including alpha/beta/rc tags. -release = iris.__version__ - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -#language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -#today = '' -# Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ['sphinxext', 'build'] - -# The reST default role (used for this markup: `text`) to use for all documents. -#default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -#add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -#show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# Define the default highlight language. This also allows the >>> removal -# javascript (copybutton.js) to function. -highlight_language = 'default' - -# A list of ignored prefixes for module index sorting. -modindex_common_prefix = ['iris'] - -intersphinx_mapping = { - 'cartopy': ('http://scitools.org.uk/cartopy/docs/latest/', None), - 'iris-grib': ('http://iris-grib.readthedocs.io/en/latest/', None), - 'matplotlib': ('http://matplotlib.org/', None), - 'numpy': ('http://docs.scipy.org/doc/numpy/', None), - 'python': ('http://docs.python.org/2.7', None), - 'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None), -} - -# -- Extlinks extension ------------------------------------------------------- - -extlinks = {'issue': ('https://github.com/SciTools/iris/issues/%s', - 'Issue #'), - 'pull': ('https://github.com/SciTools/iris/pull/%s', 'PR #'), - } - -# -- Doctest ------------------------------------------------------------------ - -doctest_global_setup = 'import iris' - -# -- Autodoc ------------------------------------------------------------------ - -autodoc_member_order = 'groupwise' -autodoc_default_flags = ['show-inheritance'] - -# include the __init__ method when documenting classes -# document the init/new method at the top level of the class documentation rather than displaying the class docstring -autoclass_content='init' - -# -- Options for HTML output --------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -html_theme = 'default' -html_theme = 'sphinxdoc' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -#html_theme_options = {} - -html_context = {'copyright_years': '2010 - {}'.format(upper_copy_year)} - -# Add any paths that contain custom themes here, relative to this directory. -html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -#html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -#html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -#html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -#html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -#html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -html_additional_pages = {'index': 'index.html', 'gallery':'gallery.html'} - -# If false, no module index is generated. -#html_domain_indices = True - -# If false, no index is generated. -html_use_index = True - -# If true, the index is split into individual pages for each letter. -#html_split_index = False - -# If true, links to the reST sources are added to the pages. -html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -html_show_sphinx = False - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -#html_use_opensearch = '' - -# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = '' - -# Output file base name for HTML help builder. -htmlhelp_basename = 'Irisdoc' - -html_use_modindex = False - - -# -- Options for LaTeX output -------------------------------------------------- - -# The paper size ('letter' or 'a4'). -#latex_paper_size = 'letter' - -# The font size ('10pt', '11pt' or '12pt'). -#latex_font_size = '10pt' - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass [howto/manual]). -latex_documents = [ - ('contents', 'Iris.tex', u'Iris Documentation', ' \\and '.join(_authors), 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -#latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -#latex_use_parts = False - -# If true, show page references after internal links. -#latex_show_pagerefs = False - -# If true, show URL addresses after external links. -#latex_show_urls = False - -# Additional stuff for the LaTeX preamble. -#latex_preamble = '' - -# Documents to append as an appendix to all manuals. -#latex_appendices = [] - -# If false, no module index is generated. -#latex_domain_indices = True -latex_elements = {} -latex_elements['docclass'] = 'MO_report' - -# -- Options for manual page output -------------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'iris', u'Iris Documentation', _authors, 1) -] - -########################## -# plot directive options # -########################## - -plot_formats = [('png', 100), - #('hires.png', 200), ('pdf', 250) - ] - - - - - -# Delete the temporary value. -del _authors diff --git a/docs/iris/src/contents.rst b/docs/iris/src/contents.rst deleted file mode 100644 index ecaf025a7a..0000000000 --- a/docs/iris/src/contents.rst +++ /dev/null @@ -1,32 +0,0 @@ -===================================== -Iris documentation table of contents -===================================== -.. toctree:: - :maxdepth: 1 - - installing.rst - -.. toctree:: - :maxdepth: 3 - - userguide/index.rst - -.. toctree:: - :maxdepth: 1 - :hidden: - - iris/iris.rst - -.. toctree:: - :maxdepth: 2 - - whatsnew/index.rst - -.. toctree:: - :maxdepth: 1 - - examples/index.rst - developers_guide/index.rst - whitepapers/index.rst - copyright.rst - diff --git a/docs/iris/src/copyright.rst b/docs/iris/src/copyright.rst deleted file mode 100644 index ed611c5ba8..0000000000 --- a/docs/iris/src/copyright.rst +++ /dev/null @@ -1,51 +0,0 @@ -========================================== -Iris copyright, licensing and contributors -========================================== - -.. |copy| unicode:: U+000A9 .. COPYRIGHT SIGN - -Iris code ---------- - -All Iris source code, unless explicitly stated, is |copy| ``British Crown copyright, 2014`` and -is licensed under the **GNU Lesser General Public License** as published by the -Free Software Foundation, either version 3 of the License, or (at your option) any later version. -You should find all source files with the following header: - -.. admonition:: Code License - - |copy| British Crown Copyright 2010 - 2014, Met Office - - This file is part of Iris. - - Iris is free software: you can redistribute it and/or modify it under - the terms of the GNU Lesser General Public License as published by the - Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - Iris is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public License - along with Iris. If not, see ``_. - - -Iris documentation and examples -------------------------------- - -All documentation, examples and sample data found on this website and in source repository -are licensed under the UK's Open Government Licence: - -.. admonition:: Documentation, example and data license - - |copy| British Crown copyright, 2014. - - You may use and re-use the information featured on this website (not including logos) free of - charge in any format or medium, under the terms of the - `Open Government Licence `_. - We encourage users to establish hypertext links to this website. - - Any email enquiries regarding the use and re-use of this information resource should be - sent to: psi@nationalarchives.gsi.gov.uk. diff --git a/docs/iris/src/developers_guide/deprecations.rst b/docs/iris/src/developers_guide/deprecations.rst deleted file mode 100644 index c7a6888984..0000000000 --- a/docs/iris/src/developers_guide/deprecations.rst +++ /dev/null @@ -1,120 +0,0 @@ -.. _iris_development_deprecations: - -Deprecations -************ - -If you need to make a backwards-incompatible change to a public API -[#public-api]_ that has been included in a release (e.g. deleting a -method), then you must first deprecate the old behaviour in at least -one release, before removing/updating it in the next -`major release `_. - - -Adding a deprecation -==================== - -.. _removing-a-public-api: - -Removing a public API ---------------------- - -The simplest form of deprecation occurs when you need to remove a public -API. The public API in question is deprecated for a period before it is -removed to allow time for user code to be updated. Sometimes the -deprecation is accompanied by the introduction of a new public API. - -Under these circumstances the following points apply: - - - Using the deprecated API must result in a concise deprecation warning which - is an instance of :class:`iris.IrisDeprecation`. - It is easiest to call - :func:`iris._deprecation.warn_deprecated`, which is a - simple wrapper to :func:`warnings.warn` with the signature - `warn_deprecation(message, **kwargs)`. - - Where possible, your deprecation warning should include advice on - how to avoid using the deprecated API. For example, you might - reference a preferred API, or more detailed documentation elsewhere. - - You must update the docstring for the deprecated API to include a - Sphinx deprecation directive: - - :literal:`.. deprecated:: ` - - where you should replace `` with the major and minor version - of Iris in which this API is first deprecated. For example: `1.8`. - - As with the deprecation warning, you should include advice on how to - avoid using the deprecated API within the content of this directive. - Feel free to include more detail in the updated docstring than in the - deprecation warning. - - You should check the documentation for references to the deprecated - API and update them as appropriate. - -Changing a default ------------------- - -When you need to change the default behaviour of a public API the -situation is slightly more complex. The recommended solution is to use -the :data:`iris.FUTURE` object. The :data:`iris.FUTURE` object provides -boolean attributes that allow user code to control at run-time the -default behaviour of corresponding public APIs. When a boolean attribute -is set to `False` it causes the corresponding public API to use its -deprecated default behaviour. When a boolean attribute is set to `True` -it causes the corresponding public API to use its new default behaviour. - -The following points apply in addition to those for removing a public -API: - - - You should add a new boolean attribute to :data:`iris.FUTURE` (by - modifying :class:`iris.Future`) that controls the default behaviour - of the public API that needs updating. The initial state of the new - boolean attribute should be `False`. You should name the new boolean - attribute to indicate that setting it to `True` will select the new - default behaviour. - - You should include a reference to this :data:`iris.FUTURE` flag in your - deprecation warning and corresponding Sphinx deprecation directive. - - -Removing a deprecation -====================== - -When the time comes to make a new major release you should locate any -deprecated APIs within the code that satisfy the one release -minimum period described previously. Locating deprecated APIs can easily -be done by searching for the Sphinx deprecation directives and/or -deprecation warnings. - -Removing a public API ---------------------- - -The deprecated API should be removed and any corresponding documentation -and/or example code should be removed/updated as appropriate. - -.. _iris_developer_future: - -Changing a default ------------------- - - - You should update the initial state of the relevant boolean attribute - of :data:`iris.FUTURE` to `True`. - - You should deprecate setting the relevant boolean attribute of - :class:`iris.Future` in the same way as described in - :ref:`removing-a-public-api`. - - -.. rubric:: Footnotes - -.. [#public-api] A name without a leading underscore in any of its - components, with the exception of the :mod:`iris.experimental` and - :mod:`iris.tests` packages. - - Example public names are: - - `iris.this.` - - `iris.this.that` - - Example private names are: - - `iris._this` - - `iris.this._that` - - `iris._this.that` - - `iris._this._that` - - `iris.experimental.something` - - `iris.tests.get_data_path` diff --git a/docs/iris/src/developers_guide/documenting/__init__.py b/docs/iris/src/developers_guide/documenting/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/docs/iris/src/developers_guide/documenting/docstrings.rst b/docs/iris/src/developers_guide/documenting/docstrings.rst deleted file mode 100644 index 4499f3fe34..0000000000 --- a/docs/iris/src/developers_guide/documenting/docstrings.rst +++ /dev/null @@ -1,77 +0,0 @@ -================ - Docstrings -================ - - -Guiding principle: Every public object in the Iris package should have an appropriate docstring. - -This document has been influenced by the following PEP's, - * Attribute Docstrings `PEP-224 `_ - * Docstring Conventions `PEP-257 `_ - - -For consistency, always use ``"""triple double quotes"""`` around docstrings. Use ``r"""raw triple double quotes"""`` if you use any backslashes in your docstrings. For Unicode docstrings, use ``u"""Unicode triple-quoted string"""``. - -All docstrings should be written in rST (reStructuredText) markup; an rST guide follows this page. - -There are two forms of docstrings: **single-line** and **multi-line** docstrings. - - -Single-line docstrings -====================== -The single line docstring of an object must state the *purpose* of that object, known as the *purpose section*. This terse overview must be on one line and ideally no longer than 90 characters. - - -Multi-line docstrings -===================== -Multi-line docstrings must consist of at least a purpose section akin to the single-line docstring, followed by a blank line and then any other content, as described below. The entire docstring should be indented to the same level as the quotes at the docstring's first line. - - -Description ------------ -The multi-line docstring *description section* should expand on what was stated in the one line *purpose section*. The description section should try not to document *argument* and *keyword argument* details. Such information should be documented in the following *arguments and keywords section*. - - -Sample multi-line docstring ---------------------------- -Here is a simple example of a standard dosctring: - -.. literalinclude:: docstrings_sample_routine.py - -This would be rendered as: - - .. currentmodule:: documenting.docstrings_sample_routine - - .. automodule:: documenting.docstrings_sample_routine - :members: - :undoc-members: - -Additionally, a summary can be extracted automatically, which would result in: - - .. autosummary:: - - documenting.docstrings_sample_routine.sample_routine - - -Documenting classes -=================== -The class constructor should be documented in the docstring for its ``__init__`` or ``__new__`` method. Methods should be documented by their own docstring, not in the class header itself. - -If a class subclasses another class and its behavior is mostly inherited from that class, its docstring should mention this and summarise the differences. Use the verb "override" to indicate that a subclass method replaces a superclass method and does not call the superclass method; use the verb "extend" to indicate that a subclass method calls the superclass method (in addition to its own behavior). - - -Attribute and Property docstrings ---------------------------------- -Here is a simple example of a class containing an attribute docstring and a property docstring: - -.. literalinclude:: docstrings_attribute.py - -This would be rendered as: - - .. currentmodule:: documenting.docstrings_attribute - - .. automodule:: documenting.docstrings_attribute - :members: - :undoc-members: - -.. note:: The purpose section of the property docstring **must** state whether the property is read-only. diff --git a/docs/iris/src/developers_guide/documenting/docstrings_attribute.py b/docs/iris/src/developers_guide/documenting/docstrings_attribute.py deleted file mode 100644 index 24e4eec5d1..0000000000 --- a/docs/iris/src/developers_guide/documenting/docstrings_attribute.py +++ /dev/null @@ -1,37 +0,0 @@ -class ExampleClass(object): - """ - Class Summary - - """ - def __init__(self, arg1, arg2): - """ - Purpose section description. - - Description section text. - - Args: - - * arg1 (int): - First argument description. - * arg2 (float): - Second argument description. - - Returns: - Boolean. - - """ - self.a = arg1 - 'Attribute arg1 docstring.' - self.b = arg2 - 'Attribute arg2 docstring.' - - @property - def square(self): - """ - *(read-only)* Purpose section description. - - Returns: - int. - - """ - return self.a*self.a diff --git a/docs/iris/src/developers_guide/documenting/docstrings_sample_routine.py b/docs/iris/src/developers_guide/documenting/docstrings_sample_routine.py deleted file mode 100644 index 92eec42d90..0000000000 --- a/docs/iris/src/developers_guide/documenting/docstrings_sample_routine.py +++ /dev/null @@ -1,26 +0,0 @@ -def sample_routine(arg1, arg2, kwarg1='foo', kwarg2=None): - """ - Purpose section text goes here. - - Description section longer text goes here. - - Args: - - * arg1 (numpy.ndarray): - First argument description. - * arg2 (numpy.ndarray): - Second argument description. - - Kwargs: - - * kwarg1 (string): - The first keyword argument. This argument description - can be multi-lined. - * kwarg2 (Boolean or None): - The second keyword argument. - - Returns: - numpy.ndarray of arg1 * arg2 - - """ - pass diff --git a/docs/iris/src/developers_guide/documenting/index.rst b/docs/iris/src/developers_guide/documenting/index.rst deleted file mode 100644 index b30a16b2a6..0000000000 --- a/docs/iris/src/developers_guide/documenting/index.rst +++ /dev/null @@ -1,10 +0,0 @@ -======================= - Documentation in Iris -======================= - -.. toctree:: - :maxdepth: 2 - - docstrings.rst - rest_guide.rst - whats_new_contributions.rst diff --git a/docs/iris/src/developers_guide/documenting/rest_guide.rst b/docs/iris/src/developers_guide/documenting/rest_guide.rst deleted file mode 100644 index 8ce97a3c4a..0000000000 --- a/docs/iris/src/developers_guide/documenting/rest_guide.rst +++ /dev/null @@ -1,28 +0,0 @@ -=============== -reST quickstart -=============== - - -reST (http://en.wikipedia.org/wiki/ReStructuredText) is a lightweight markup language intended to be highly readable in source format. This guide will cover some of the more frequently used advanced reST markup syntaxes, for the basics of reST the following links may be useful: - - * http://sphinx.pocoo.org/rest.html - * http://docs.geoserver.org/trunk/en/docguide/sphinx.html - * http://packages.python.org/an_example_pypi_project/sphinx.html - -Reference documentation for reST can be found at http://docutils.sourceforge.net/rst.html. - -Creating links --------------- -Basic links can be created with ```Text of the link `_`` which will look like `Text of the link `_ - - -Documents in the same project can be cross referenced with the syntax ``:doc:`document_name``` for example, to reference the "docstrings" page ``:doc:`docstrings``` creates the following link :doc:`docstrings` - - -References can be created between sections by first making a "label" where you would like the link to point to ``.. _name_of_reference::`` the appropriate link can now be created with ``:ref:`name_of_reference``` (note the trailing underscore on the label) - - -Cross referencing other reference documentation can be achieved with the syntax ``:py:class:`zipfile.ZipFile``` which will result in links such as :py:class:`zipfile.ZipFile` and :py:class:`numpy.ndarray`. - - - diff --git a/docs/iris/src/developers_guide/documenting/whats_new_contributions.rst b/docs/iris/src/developers_guide/documenting/whats_new_contributions.rst deleted file mode 100644 index 203a422457..0000000000 --- a/docs/iris/src/developers_guide/documenting/whats_new_contributions.rst +++ /dev/null @@ -1,123 +0,0 @@ -.. _whats_new_contributions: - -================================= -Contributing a "What's New" entry -================================= - -Iris has an aggregator for building a draft what's new document for each -release. The draft what's new document is built from contributions by code authors. -This means contributions to the what's new document are written by the -developer most familiar with the change made. - -A contribution provides an entry in the what's new document, which describes a -change that improved Iris in some way. This change may be a new feature in Iris -or the fix for a bug introduced in a previous release. The contribution should -be included as part of the Iris Pull Request that introduces the change. - -When a new release is prepared, the what's new contributions are combined into -a draft what's new document for the release. - - -Writing a Contribution -====================== - -As introduced above, a contribution is the description of a change to Iris -which improved Iris in some way. As such, a single Iris Pull Request may -contain multiple changes that are worth highlighting as contributions to the -what's new document. - -Each contribution will ideally be written as a single concise bullet point. -The content of the bullet point should highlight the change that has been made -to Iris, targeting an Iris user as the audience. - -A contribution is a feature summary by the code author, which avoids the -release developer having to personally review the change in detail : -It is not in itself the final documentation content, -so it does not have to be perfect or complete in every respect. - - -Adding Contribution Files -========================= - -Each release must have a directory called ``contributions_``, -which should be created following the release of the current version of Iris. Each -release directory must be placed in ``docs/iris/src/whatsnew/``. -Contributions to the what's new must be written in markdown and placed into this -directory in text files. The filename for each item should be structured as follows: - -``__.txt`` - -Category --------- -The category must be one of the following: - -*newfeature* - Features that are new or changed to add functionality. -*bugfix* - A bugfix. -*incompatiblechange* - A change that causes an incompatibility with prior versions of Iris. -*deprecate* - Deprecations of functionality. -*docchange* - Changes to documentation. - -Date ----- - -The date must be a hyphen-separated date in the format of: - - * a four digit year, - * a three character month name, and - * a two digit day. - -For example: - - * 2012-Jan-30 - * 2014-May-03 - * 2015-Feb-19 - -Summary -------- - -The summary can be any remaining filename characters, and simply provides a -short identifying description of the change. - -For example: - - * whats-new-aggregator - * using_mo_pack - * correction-to-bilinear-regrid - * GRIB2_pdt11 - - -Complete Examples ------------------ - -Some sample what's new contribution filenames: - - * bugfix_2015-Aug-18_partial_pp_constraints.txt - * deprecate_2015-Nov-01_unit-module.txt - * incompatiblechange_2015-Oct-12_GRIB_optional_Python3_unavailable.txt - * newfeature_2015-Jul-03_pearsonr_rewrite.txt - -.. note:: - A test in the standard test suite ensures that all the contents of the - latest contributions directory conform to this naming scheme. - - -Compiling a Draft -================= - -Compiling a draft from the supplied contributions should be done when preparing -a release. Running ``docs/iris/src/whatsnew/aggregate_directory.py`` with the -release number as the argument will create a draft what's new with the name -``.rst`` file for the specified release, by aggregating the individual -contributions from the relevant folder. -Omitting the release number will build the latest version for which a -contributions folder is present. -This command fails if a file with the relevant name already exists. - -The resulting draft document is only a starting point, which the release -developer will then edit to produce the final 'What's new in Iris x.x' -documentation. diff --git a/docs/iris/src/developers_guide/gitwash/LICENSE b/docs/iris/src/developers_guide/gitwash/LICENSE deleted file mode 100644 index 0ea9a5957b..0000000000 --- a/docs/iris/src/developers_guide/gitwash/LICENSE +++ /dev/null @@ -1,34 +0,0 @@ -========= - LICENSE -========= - -We release the documents under the Creative Commons attribution license: -http://creativecommons.org/licenses/by/3.0/ - -We release the code under the simplified BSD license: - -Copyright (c) 2010, Matthew Brett -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - -* Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS -IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED -TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED -TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/docs/iris/src/developers_guide/gitwash/branch_dropdown.png b/docs/iris/src/developers_guide/gitwash/branch_dropdown.png deleted file mode 100644 index 1bb7a57773..0000000000 Binary files a/docs/iris/src/developers_guide/gitwash/branch_dropdown.png and /dev/null differ diff --git a/docs/iris/src/developers_guide/gitwash/development_workflow.rst b/docs/iris/src/developers_guide/gitwash/development_workflow.rst deleted file mode 100644 index 4da6b700ba..0000000000 --- a/docs/iris/src/developers_guide/gitwash/development_workflow.rst +++ /dev/null @@ -1,421 +0,0 @@ -.. _development-workflow: - -#################### -Development workflow -#################### - -You already have your own forked copy of the `iris`_ repository, by -following :ref:`forking`. You have :ref:`set-up-fork`. You have configured -git by following :ref:`configure-git`. Now you are ready for some real work. - -Workflow summary -================ - -In what follows we'll refer to the upstream iris ``master`` branch, as -"trunk". - -* Don't use your ``master`` branch for anything. Consider deleting it. -* When you are starting a new set of changes, fetch any changes from trunk, - and start a new *feature branch* from that. -* Make a new branch for each separable set of changes |emdash| "one task, one - branch" (`ipython git workflow`_). -* Name your branch for the purpose of the changes - e.g. - ``bugfix-for-issue-14`` or ``refactor-database-code``. -* If you can possibly avoid it, avoid merging trunk or any other branches into - your feature branch while you are working. -* If you do find yourself merging from trunk, consider :ref:`rebase-on-trunk` -* Ask on the `iris mailing list`_ if you get stuck. -* Ask for code review! - -This way of working helps to keep work well organized, with readable history. -This in turn makes it easier for project maintainers (that might be you) to see -what you've done, and why you did it. - -See `linux git workflow`_ and `ipython git workflow`_ for some explanation. - -Consider deleting your master branch -==================================== - -It may sound strange, but deleting your own ``master`` branch can help reduce -confusion about which branch you are on. See `deleting master on github`_ for -details. - -.. _update-mirror-trunk: - -Update the mirror of trunk -========================== - -First make sure you have done :ref:`linking-to-upstream`. - -From time to time you should fetch the upstream (trunk) changes from github:: - - git fetch upstream - -This will pull down any commits you don't have, and set the remote branches to -point to the right commit. For example, 'trunk' is the branch referred to by -(remote/branchname) ``upstream/master`` - and if there have been commits since -you last checked, ``upstream/master`` will change after you do the fetch. - -.. _make-feature-branch: - -Make a new feature branch -========================= - -When you are ready to make some changes to the code, you should start a new -branch. Branches that are for a collection of related edits are often called -'feature branches'. - -Making an new branch for each set of related changes will make it easier for -someone reviewing your branch to see what you are doing. - -Choose an informative name for the branch to remind yourself and the rest of us -what the changes in the branch are for. For example ``add-ability-to-fly``, or -``buxfix-for-issue-42``. - -:: - - # Update the mirror of trunk - git fetch upstream - # Make new feature branch starting at current trunk - git branch my-new-feature upstream/master - git checkout my-new-feature - -Generally, you will want to keep your feature branches on your public github_ -fork of `iris`_. To do this, you `git push`_ this new branch up to your -github repo. Generally (if you followed the instructions in these pages, and by -default), git will have a link to your github repo, called ``origin``. You push -up to your own repo on github with:: - - git push origin my-new-feature - -In git >= 1.7 you can ensure that the link is correctly set by using the -``--set-upstream`` option:: - - git push --set-upstream origin my-new-feature - -From now on git will know that ``my-new-feature`` is related to the -``my-new-feature`` branch in the github repo. - -.. _edit-flow: - -The editing workflow -==================== - -Overview --------- - -:: - - # hack hack - git add my_new_file - git commit -am 'NF - some message' - git push - -In more detail --------------- - -#. Make some changes -#. See which files have changed with ``git status`` (see `git status`_). - You'll see a listing like this one:: - - # On branch ny-new-feature - # Changed but not updated: - # (use "git add ..." to update what will be committed) - # (use "git checkout -- ..." to discard changes in working directory) - # - # modified: README - # - # Untracked files: - # (use "git add ..." to include in what will be committed) - # - # INSTALL - no changes added to commit (use "git add" and/or "git commit -a") - -#. Check what the actual changes are with ``git diff`` (`git diff`_). -#. Add any new files to version control ``git add new_file_name`` (see - `git add`_). -#. To commit all modified files into the local copy of your repo,, do - ``git commit -am 'A commit message'``. Note the ``-am`` options to - ``commit``. The ``m`` flag just signals that you're going to type a - message on the command line. The ``a`` flag |emdash| you can just take on - faith |emdash| or see `why the -a flag?`_ |emdash| and the helpful use-case - description in the `tangled working copy problem`_. The `git commit`_ manual - page might also be useful. -#. To push the changes up to your forked repo on github, do a ``git - push`` (see `git push`_). - -Testing your changes -==================== - -Once you are happy with your changes, work thorough the :ref:`pr_check` and make sure -your branch passess all the relevant tests. - -Ask for your changes to be reviewed or merged -============================================= - -When you are ready to ask for someone to review your code and consider a merge: - -#. Go to the URL of your forked repo, say - ``http://github.com/your-user-name/iris``. -#. Use the 'Switch Branches' dropdown menu near the top left of the page to - select the branch with your changes: - - .. image:: branch_dropdown.png - -#. Click on the 'Pull request' button: - - .. image:: pull_button.png - - Enter a title for the set of changes, and some explanation of what you've - done. Say if there is anything you'd like particular attention for - like a - complicated change or some code you are not happy with. - - If you don't think your request is ready to be merged, just say so in your - pull request message. This is still a good way of getting some preliminary - code review. - -Some other things you might want to do -====================================== - -Delete a branch on github -------------------------- - -:: - - git checkout master - # delete branch locally - git branch -D my-unwanted-branch - # delete branch on github - git push origin :my-unwanted-branch - -(Note the colon ``:`` before ``test-branch``. See also: -http://github.com/guides/remove-a-remote-branch - -Several people sharing a single repository ------------------------------------------- - -If you want to work on some stuff with other people, where you are all -committing into the same repository, or even the same branch, then just -share it via github. - -First fork iris into your account, as from :ref:`forking`. - -Then, go to your forked repository github page, say -``http://github.com/your-user-name/iris`` - -Click on the 'Admin' button, and add anyone else to the repo as a -collaborator: - - .. image:: pull_button.png - -Now all those people can do:: - - git clone git@githhub.com:your-user-name/iris.git - -Remember that links starting with ``git@`` use the ssh protocol and are -read-write; links starting with ``git://`` are read-only. - -Your collaborators can then commit directly into that repo with the -usual:: - - git commit -am 'ENH - much better code' - git push origin master # pushes directly into your repo - -Explore your repository ------------------------ - -To see a graphical representation of the repository branches and -commits:: - - gitk --all - -To see a linear list of commits for this branch:: - - git log - -You can also look at the `network graph visualizer`_ for your github -repo. - -Finally the :ref:`fancy-log` ``lg`` alias will give you a reasonable text-based -graph of the repository. - -.. _rebase-on-trunk: - -Rebasing on trunk ------------------ - -Let's say you thought of some work you'd like to do. You -:ref:`update-mirror-trunk` and :ref:`make-feature-branch` called -``cool-feature``. At this stage trunk is at some commit, let's call it E. Now -you make some new commits on your ``cool-feature`` branch, let's call them A, B, -C. Maybe your changes take a while, or you come back to them after a while. In -the meantime, trunk has progressed from commit E to commit (say) G:: - - A---B---C cool-feature - / - D---E---F---G trunk - -At this stage you consider merging trunk into your feature branch, and you -remember that this here page sternly advises you not to do that, because the -history will get messy. Most of the time you can just ask for a review, and not -worry that trunk has got a little ahead. But sometimes, the changes in trunk -might affect your changes, and you need to harmonize them. In this situation -you may prefer to do a rebase. - -rebase takes your changes (A, B, C) and replays them as if they had been made to -the current state of ``trunk``. In other words, in this case, it takes the -changes represented by A, B, C and replays them on top of G. After the rebase, -your history will look like this:: - - A'--B'--C' cool-feature - / - D---E---F---G trunk - -See `rebase without tears`_ for more detail. - -To do a rebase on trunk:: - - # Update the mirror of trunk - git fetch upstream - # go to the feature branch - git checkout cool-feature - # make a backup in case you mess up - git branch tmp cool-feature - # rebase cool-feature onto trunk - git rebase --onto upstream/master upstream/master cool-feature - -In this situation, where you are already on branch ``cool-feature``, the last -command can be written more succinctly as:: - - git rebase upstream/master - -When all looks good you can delete your backup branch:: - - git branch -D tmp - -If it doesn't look good you may need to have a look at -:ref:`recovering-from-mess-up`. - -If you have made changes to files that have also changed in trunk, this may -generate merge conflicts that you need to resolve - see the `git rebase`_ man -page for some instructions at the end of the "Description" section. There is -some related help on merging in the git user manual - see `resolving a merge`_. - -.. _recovering-from-mess-up: - -Recovering from mess-ups ------------------------- - -Sometimes, you mess up merges or rebases. Luckily, in git it is -relatively straightforward to recover from such mistakes. - -If you mess up during a rebase:: - - git rebase --abort - -If you notice you messed up after the rebase:: - - # reset branch back to the saved point - git reset --hard tmp - -If you forgot to make a backup branch:: - - # look at the reflog of the branch - git reflog show cool-feature - - 8630830 cool-feature@{0}: commit: BUG: io: close file handles immediately - 278dd2a cool-feature@{1}: rebase finished: refs/heads/my-feature-branch onto 11ee694744f2552d - 26aa21a cool-feature@{2}: commit: BUG: lib: make seek_gzip_factory not leak gzip obj - ... - - # reset the branch to where it was before the botched rebase - git reset --hard cool-feature@{2} - -.. _rewriting-commit-history: - -Rewriting commit history ------------------------- - -.. note:: - - Do this only for your own feature branches. - -There's an embarrassing typo in a commit you made? Or perhaps the you -made several false starts you would like the posterity not to see. - -This can be done via *interactive rebasing*. - -Suppose that the commit history looks like this:: - - git log --oneline - eadc391 Fix some remaining bugs - a815645 Modify it so that it works - 2dec1ac Fix a few bugs + disable - 13d7934 First implementation - 6ad92e5 * masked is now an instance of a new object, MaskedConstant - 29001ed Add pre-nep for a copule of structured_array_extensions. - ... - -and ``6ad92e5`` is the last commit in the ``cool-feature`` branch. Suppose we -want to make the following changes: - -* Rewrite the commit message for ``13d7934`` to something more sensible. -* Combine the commits ``2dec1ac``, ``a815645``, ``eadc391`` into a single one. - -We do as follows:: - - # make a backup of the current state - git branch tmp HEAD - # interactive rebase - git rebase -i 6ad92e5 - -This will open an editor with the following text in it:: - - pick 13d7934 First implementation - pick 2dec1ac Fix a few bugs + disable - pick a815645 Modify it so that it works - pick eadc391 Fix some remaining bugs - - # Rebase 6ad92e5..eadc391 onto 6ad92e5 - # - # Commands: - # p, pick = use commit - # r, reword = use commit, but edit the commit message - # e, edit = use commit, but stop for amending - # s, squash = use commit, but meld into previous commit - # f, fixup = like "squash", but discard this commit's log message - # - # If you remove a line here THAT COMMIT WILL BE LOST. - # However, if you remove everything, the rebase will be aborted. - # - -To achieve what we want, we will make the following changes to it:: - - r 13d7934 First implementation - pick 2dec1ac Fix a few bugs + disable - f a815645 Modify it so that it works - f eadc391 Fix some remaining bugs - -This means that (i) we want to edit the commit message for -``13d7934``, and (ii) collapse the last three commits into one. Now we -save and quit the editor. - -Git will then immediately bring up an editor for editing the commit -message. After revising it, we get the output:: - - [detached HEAD 721fc64] FOO: First implementation - 2 files changed, 199 insertions(+), 66 deletions(-) - [detached HEAD 0f22701] Fix a few bugs + disable - 1 files changed, 79 insertions(+), 61 deletions(-) - Successfully rebased and updated refs/heads/my-feature-branch. - -and the history looks now like this:: - - 0f22701 Fix a few bugs + disable - 721fc64 ENH: Sophisticated feature - 6ad92e5 * masked is now an instance of a new object, MaskedConstant - -If it went wrong, recovery is again possible as explained :ref:`above -`. - -.. include:: links.inc diff --git a/docs/iris/src/developers_guide/gitwash/forking_button.png b/docs/iris/src/developers_guide/gitwash/forking_button.png deleted file mode 100644 index d0e04134d4..0000000000 Binary files a/docs/iris/src/developers_guide/gitwash/forking_button.png and /dev/null differ diff --git a/docs/iris/src/developers_guide/gitwash/forking_hell.rst b/docs/iris/src/developers_guide/gitwash/forking_hell.rst deleted file mode 100644 index 2b38c02736..0000000000 --- a/docs/iris/src/developers_guide/gitwash/forking_hell.rst +++ /dev/null @@ -1,33 +0,0 @@ -.. _forking: - -====================================================== -Making your own copy (fork) of iris -====================================================== - -You need to do this only once. The instructions here are very similar -to the instructions at http://help.github.com/forking/ |emdash| please see -that page for more detail. We're repeating some of it here just to give the -specifics for the `iris`_ project, and to suggest some default names. - -Set up and configure a github account -===================================== - -If you don't have a github account, go to the github page, and make one. - -You then need to configure your account to allow write access |emdash| see -the ``Generating SSH keys`` help on `github help`_. - -Create your own forked copy of `iris`_ -====================================================== - -#. Log into your github account. -#. Go to the `iris`_ github home at `iris github`_. -#. Click on the *fork* button: - - .. image:: forking_button.png - - Now, after a short pause and some 'Hardcore forking action', you - should find yourself at the home page for your own forked copy of `iris`_. - -.. include:: links.inc - diff --git a/docs/iris/src/developers_guide/gitwash/git_development.rst b/docs/iris/src/developers_guide/gitwash/git_development.rst deleted file mode 100644 index c5b910d863..0000000000 --- a/docs/iris/src/developers_guide/gitwash/git_development.rst +++ /dev/null @@ -1,16 +0,0 @@ -.. _git-development: - -===================== - Git for development -===================== - -Contents: - -.. toctree:: - :maxdepth: 2 - - forking_hell - set_up_fork - configure_git - development_workflow - maintainer_workflow diff --git a/docs/iris/src/developers_guide/gitwash/git_install.rst b/docs/iris/src/developers_guide/gitwash/git_install.rst deleted file mode 100644 index 3be5149b90..0000000000 --- a/docs/iris/src/developers_guide/gitwash/git_install.rst +++ /dev/null @@ -1,26 +0,0 @@ -.. _install-git: - -============= - Install git -============= - -Overview -======== - -================ ============= -Debian / Ubuntu ``sudo apt-get install git`` -Fedora ``sudo yum install git`` -Windows Download and install msysGit_ -OS X Use the git-osx-installer_ -================ ============= - -In detail -========= - -See the git page for the most recent information. - -Have a look at the github install help pages available from `github help`_ - -There are good instructions here: http://book.git-scm.com/2_installing_git.html - -.. include:: links.inc diff --git a/docs/iris/src/developers_guide/gitwash/git_intro.rst b/docs/iris/src/developers_guide/gitwash/git_intro.rst deleted file mode 100644 index 486e1c6c08..0000000000 --- a/docs/iris/src/developers_guide/gitwash/git_intro.rst +++ /dev/null @@ -1,18 +0,0 @@ -============== - Introduction -============== - -These pages describe a git_ and github_ workflow for the `iris`_ -project. - -There are several different workflows here, for different ways of -working with *iris*. - -This is not a comprehensive git reference, it's just a workflow for our -own project. It's tailored to the github hosting service. You may well -find better or quicker ways of getting stuff done with git, but these -should get you started. - -For general resources for learning git, see :ref:`git-resources`. - -.. include:: links.inc diff --git a/docs/iris/src/developers_guide/gitwash/git_links.inc b/docs/iris/src/developers_guide/gitwash/git_links.inc deleted file mode 100644 index 8e628ae19e..0000000000 --- a/docs/iris/src/developers_guide/gitwash/git_links.inc +++ /dev/null @@ -1,61 +0,0 @@ -.. This (-*- rst -*-) format file contains commonly used link targets - and name substitutions. It may be included in many files, - therefore it should only contain link targets and name - substitutions. Try grepping for "^\.\. _" to find plausible - candidates for this list. - -.. NOTE: reST targets are - __not_case_sensitive__, so only one target definition is needed for - nipy, NIPY, Nipy, etc... - -.. git stuff -.. _git: http://git-scm.com/ -.. _github: http://github.com -.. _github help: http://help.github.com -.. _msysgit: http://code.google.com/p/msysgit/downloads/list -.. _git-osx-installer: http://code.google.com/p/git-osx-installer/downloads/list -.. _subversion: http://subversion.tigris.org/ -.. _git cheat sheet: http://github.com/guides/git-cheat-sheet -.. _pro git book: http://progit.org/ -.. _git svn crash course: http://git-scm.com/course/svn.html -.. _learn.github: http://learn.github.com/ -.. _network graph visualizer: http://github.com/blog/39-say-hello-to-the-network-graph-visualizer -.. _git user manual: http://schacon.github.com/git/user-manual.html -.. _git tutorial: http://schacon.github.com/git/gittutorial.html -.. _git community book: http://book.git-scm.com/ -.. _git ready: http://www.gitready.com/ -.. _git casts: http://www.gitcasts.com/ -.. _Fernando's git page: http://www.fperez.org/py4science/git.html -.. _git magic: http://www-cs-students.stanford.edu/~blynn/gitmagic/index.html -.. _git concepts: http://www.eecs.harvard.edu/~cduan/technical/git/ -.. _git clone: http://schacon.github.com/git/git-clone.html -.. _git checkout: http://schacon.github.com/git/git-checkout.html -.. _git commit: http://schacon.github.com/git/git-commit.html -.. _git push: http://schacon.github.com/git/git-push.html -.. _git pull: http://schacon.github.com/git/git-pull.html -.. _git add: http://schacon.github.com/git/git-add.html -.. _git status: http://schacon.github.com/git/git-status.html -.. _git diff: http://schacon.github.com/git/git-diff.html -.. _git log: http://schacon.github.com/git/git-log.html -.. _git branch: http://schacon.github.com/git/git-branch.html -.. _git remote: http://schacon.github.com/git/git-remote.html -.. _git rebase: http://schacon.github.com/git/git-rebase.html -.. _git config: http://schacon.github.com/git/git-config.html -.. _why the -a flag?: http://www.gitready.com/beginner/2009/01/18/the-staging-area.html -.. _git staging area: http://www.gitready.com/beginner/2009/01/18/the-staging-area.html -.. _tangled working copy problem: http://tomayko.com/writings/the-thing-about-git -.. _git management: http://kerneltrap.org/Linux/Git_Management -.. _linux git workflow: http://www.mail-archive.com/dri-devel@lists.sourceforge.net/msg39091.html -.. _git parable: http://tom.preston-werner.com/2009/05/19/the-git-parable.html -.. _git foundation: http://matthew-brett.github.com/pydagogue/foundation.html -.. _deleting master on github: http://matthew-brett.github.com/pydagogue/gh_delete_master.html -.. _rebase without tears: http://matthew-brett.github.com/pydagogue/rebase_without_tears.html -.. _resolving a merge: http://schacon.github.com/git/user-manual.html#resolving-a-merge -.. _ipython git workflow: http://mail.scipy.org/pipermail/ipython-dev/2010-October/006746.html - -.. other stuff -.. _python: http://www.python.org - -.. |emdash| unicode:: U+02014 - -.. vim: ft=rst diff --git a/docs/iris/src/developers_guide/gitwash/git_resources.rst b/docs/iris/src/developers_guide/gitwash/git_resources.rst deleted file mode 100644 index d18b0ef48b..0000000000 --- a/docs/iris/src/developers_guide/gitwash/git_resources.rst +++ /dev/null @@ -1,59 +0,0 @@ -.. _git-resources: - -============= -git resources -============= - -Tutorials and summaries -======================= - -* `github help`_ has an excellent series of how-to guides. -* `learn.github`_ has an excellent series of tutorials -* The `pro git book`_ is a good in-depth book on git. -* A `git cheat sheet`_ is a page giving summaries of common commands. -* The `git user manual`_ -* The `git tutorial`_ -* The `git community book`_ -* `git ready`_ |emdash| a nice series of tutorials -* `git casts`_ |emdash| video snippets giving git how-tos. -* `git magic`_ |emdash| extended introduction with intermediate detail -* The `git parable`_ is an easy read explaining the concepts behind git. -* `git foundation`_ expands on the `git parable`_. -* Fernando Perez' git page |emdash| `Fernando's git page`_ |emdash| many - links and tips -* A good but technical page on `git concepts`_ -* `git svn crash course`_: git for those of us used to subversion_ - -Advanced git workflow -===================== - -There are many ways of working with git; here are some posts on the -rules of thumb that other projects have come up with: - -* Linus Torvalds on `git management`_ -* Linus Torvalds on `linux git workflow`_ . Summary; use the git tools - to make the history of your edits as clean as possible; merge from - upstream edits as little as possible in branches where you are doing - active development. - -Manual pages online -=================== - -You can get these on your own machine with (e.g) ``git help push`` or -(same thing) ``git push --help``, but, for convenience, here are the -online manual pages for some common commands: - -* `git add`_ -* `git branch`_ -* `git checkout`_ -* `git clone`_ -* `git commit`_ -* `git config`_ -* `git diff`_ -* `git log`_ -* `git pull`_ -* `git push`_ -* `git remote`_ -* `git status`_ - -.. include:: links.inc diff --git a/docs/iris/src/developers_guide/gitwash/index.rst b/docs/iris/src/developers_guide/gitwash/index.rst deleted file mode 100644 index 35eee1944a..0000000000 --- a/docs/iris/src/developers_guide/gitwash/index.rst +++ /dev/null @@ -1,16 +0,0 @@ -.. _using-git: - -Working with *iris* source code -================================================ - -Contents: - -.. toctree:: - :maxdepth: 2 - - git_intro - git_install - git_development - git_resources - - diff --git a/docs/iris/src/developers_guide/gitwash/known_projects.inc b/docs/iris/src/developers_guide/gitwash/known_projects.inc deleted file mode 100644 index 1761d975aa..0000000000 --- a/docs/iris/src/developers_guide/gitwash/known_projects.inc +++ /dev/null @@ -1,41 +0,0 @@ -.. Known projects - -.. PROJECTNAME placeholders -.. _PROJECTNAME: http://nipy.org -.. _`PROJECTNAME github`: https://github.com/nipy -.. _`PROJECTNAME mailing list`: https://mail.python.org/mailman/listinfo/neuroimaging - -.. numpy -.. _numpy: http://www.numpy.org -.. _`numpy github`: https://github.com/numpy/numpy -.. _`numpy mailing list`: http://mail.scipy.org/mailman/listinfo/numpy-discussion - -.. scipy -.. _scipy: https://www.scipy.org -.. _`scipy github`: https://github.com/scipy/scipy -.. _`scipy mailing list`: http://mail.scipy.org/mailman/listinfo/scipy-dev - -.. nipy -.. _nipy: http://nipy.org/nipy -.. _`nipy github`: https://github.com/nipy/nipy -.. _`nipy mailing list`: https://mail.python.org/mailman/listinfo/neuroimaging - -.. ipython -.. _ipython: https://ipython.org -.. _`ipython github`: https://github.com/ipython/ipython -.. _`ipython mailing list`: http://mail.scipy.org/mailman/listinfo/IPython-dev - -.. dipy -.. _dipy: http://nipy.org/dipy -.. _`dipy github`: https://github.com/Garyfallidis/dipy -.. _`dipy mailing list`: https://mail.python.org/mailman/listinfo/neuroimaging - -.. nibabel -.. _nibabel: http://nipy.org/nibabel -.. _`nibabel github`: https://github.com/nipy/nibabel -.. _`nibabel mailing list`: https://mail.python.org/mailman/listinfo/neuroimaging - -.. marsbar -.. _marsbar: http://marsbar.sourceforge.net -.. _`marsbar github`: https://github.com/matthew-brett/marsbar -.. _`MarsBaR mailing list`: https://lists.sourceforge.net/lists/listinfo/marsbar-users diff --git a/docs/iris/src/developers_guide/gitwash/links.inc b/docs/iris/src/developers_guide/gitwash/links.inc deleted file mode 100644 index 20f4dcfffd..0000000000 --- a/docs/iris/src/developers_guide/gitwash/links.inc +++ /dev/null @@ -1,4 +0,0 @@ -.. compiling links file -.. include:: known_projects.inc -.. include:: this_project.inc -.. include:: git_links.inc diff --git a/docs/iris/src/developers_guide/gitwash/maintainer_workflow.rst b/docs/iris/src/developers_guide/gitwash/maintainer_workflow.rst deleted file mode 100644 index b05be47611..0000000000 --- a/docs/iris/src/developers_guide/gitwash/maintainer_workflow.rst +++ /dev/null @@ -1,96 +0,0 @@ -.. _maintainer-workflow: - -################### -Maintainer workflow -################### - -This page is for maintainers |emdash| those of us who merge our own or other -peoples' changes into the upstream repository. - -Being as how you're a maintainer, you are completely on top of the basic stuff -in :ref:`development-workflow`. - -The instructions in :ref:`linking-to-upstream` add a remote that has read-only -access to the upstream repo. Being a maintainer, you've got read-write access. - -It's good to have your upstream remote have a scary name, to remind you that -it's a read-write remote:: - - git remote add upstream-rw git@github.com:SciTools/iris.git - git fetch upstream-rw - -******************* -Integrating changes -******************* - -Let's say you have some changes that need to go into trunk -(``upstream-rw/master``). - -The changes are in some branch that you are currently on. For example, you are -looking at someone's changes like this:: - - git remote add someone git://github.com/someone/iris.git - git fetch someone - git branch cool-feature --track someone/cool-feature - git checkout cool-feature - -So now you are on the branch with the changes to be incorporated upstream. The -rest of this section assumes you are on this branch. - -A few commits -============= - -If there are only a few commits, consider rebasing to upstream:: - - # Fetch upstream changes - git fetch upstream-rw - # rebase - git rebase upstream-rw/master - -Remember that, if you do a rebase, and push that, you'll have to close any -github pull requests manually, because github will not be able to detect the -changes have already been merged. - -A long series of commits -======================== - -If there are a longer series of related commits, consider a merge instead:: - - git fetch upstream-rw - git merge --no-ff upstream-rw/master - -The merge will be detected by github, and should close any related pull requests -automatically. - -Note the ``--no-ff`` above. This forces git to make a merge commit, rather than -doing a fast-forward, so that these set of commits branch off trunk then rejoin -the main history with a merge, rather than appearing to have been made directly -on top of trunk. - -Check the history -================= - -Now, in either case, you should check that the history is sensible and you have -the right commits:: - - git log --oneline --graph - git log -p upstream-rw/master.. - -The first line above just shows the history in a compact way, with a text -representation of the history graph. The second line shows the log of commits -excluding those that can be reached from trunk (``upstream-rw/master``), and -including those that can be reached from current HEAD (implied with the ``..`` -at the end). So, it shows the commits unique to this branch compared to trunk. -The ``-p`` option shows the diff for these commits in patch form. - -Push to trunk -============= - -:: - - git push upstream-rw my-new-feature:master - -This pushes the ``my-new-feature`` branch in this repository to the ``master`` -branch in the ``upstream-rw`` repository. - -.. include:: links.inc diff --git a/docs/iris/src/developers_guide/gitwash/pull_button.png b/docs/iris/src/developers_guide/gitwash/pull_button.png deleted file mode 100644 index e5031681b9..0000000000 Binary files a/docs/iris/src/developers_guide/gitwash/pull_button.png and /dev/null differ diff --git a/docs/iris/src/developers_guide/gitwash/set_up_fork.rst b/docs/iris/src/developers_guide/gitwash/set_up_fork.rst deleted file mode 100644 index 172cbb2051..0000000000 --- a/docs/iris/src/developers_guide/gitwash/set_up_fork.rst +++ /dev/null @@ -1,68 +0,0 @@ -.. _set-up-fork: - -================== - Set up your fork -================== - -First you follow the instructions for :ref:`forking`. - -Overview -======== - -:: - - git clone git@github.com:your-user-name/iris.git - cd iris - git remote add upstream git://github.com/SciTools/iris.git - -In detail -========= - -Clone your fork ---------------- - -#. Clone your fork to the local computer with ``git clone - git@github.com:your-user-name/iris.git`` -#. Investigate. Change directory to your new repo: ``cd iris``. Then - ``git branch -a`` to show you all branches. You'll get something - like:: - - * master - remotes/origin/master - - This tells you that you are currently on the ``master`` branch, and - that you also have a ``remote`` connection to ``origin/master``. - What remote repository is ``remote/origin``? Try ``git remote -v`` to - see the URLs for the remote. They will point to your github fork. - - Now you want to connect to the upstream `iris github`_ repository, so - you can merge in changes from trunk. - -.. _linking-to-upstream: - -Linking your repository to the upstream repo --------------------------------------------- - -:: - - cd iris - git remote add upstream git://github.com/SciTools/iris.git - -``upstream`` here is just the arbitrary name we're using to refer to the -main `iris`_ repository at `iris github`_. - -Note that we've used ``git://`` for the URL rather than ``git@``. The -``git://`` URL is read only. This means we that we can't accidentally -(or deliberately) write to the upstream repo, and we are only going to -use it to merge into our own code. - -Just for your own satisfaction, show yourself that you now have a new -'remote', with ``git remote -v show``, giving you something like:: - - upstream git://github.com/SciTools/iris.git (fetch) - upstream git://github.com/SciTools/iris.git (push) - origin git@github.com:your-user-name/iris.git (fetch) - origin git@github.com:your-user-name/iris.git (push) - -.. include:: links.inc - diff --git a/docs/iris/src/developers_guide/gitwash/this_project.inc b/docs/iris/src/developers_guide/gitwash/this_project.inc deleted file mode 100644 index 38219bf4b4..0000000000 --- a/docs/iris/src/developers_guide/gitwash/this_project.inc +++ /dev/null @@ -1,5 +0,0 @@ -.. iris -.. _`iris`: http://scitools.org.uk/iris -.. _`iris github`: http://github.com/SciTools/iris - -.. _`iris mailing list`: https://groups.google.com/forum/#!forum/scitools-iris diff --git a/docs/iris/src/developers_guide/gitwash_build.sh b/docs/iris/src/developers_guide/gitwash_build.sh deleted file mode 100755 index e1c4cdb2af..0000000000 --- a/docs/iris/src/developers_guide/gitwash_build.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/env sh - - -# Generate the gitwash sub-directory. -echo -echo "Building gitwash ..." -echo -python gitwash_dumper.py --repo-name=iris --github-user=SciTools --gitwash-url=https://github.com/matthew-brett/gitwash.git --project-url=http://scitools.org.uk/iris --project-ml-url=https://groups.google.com/forum/#!forum/scitools-iris ./ iris diff --git a/docs/iris/src/developers_guide/gitwash_dumper.py b/docs/iris/src/developers_guide/gitwash_dumper.py deleted file mode 100644 index 999a738fa6..0000000000 --- a/docs/iris/src/developers_guide/gitwash_dumper.py +++ /dev/null @@ -1,236 +0,0 @@ -#!/usr/bin/env python -''' Checkout gitwash repo into directory and do search replace on name ''' - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -import os -from os.path import join as pjoin -import shutil -import sys -import re -import glob -import fnmatch -import tempfile -from subprocess import call -from optparse import OptionParser - -verbose = False - - -def clone_repo(url, branch): - cwd = os.getcwd() - tmpdir = tempfile.mkdtemp() - try: - cmd = 'git clone %s %s' % (url, tmpdir) - call(cmd, shell=True) - os.chdir(tmpdir) - cmd = 'git checkout %s' % branch - call(cmd, shell=True) - except: - shutil.rmtree(tmpdir) - raise - finally: - os.chdir(cwd) - return tmpdir - - -def cp_files(in_path, globs, out_path): - try: - os.makedirs(out_path) - except OSError: - pass - out_fnames = [] - for in_glob in globs: - in_glob_path = pjoin(in_path, in_glob) - for in_fname in glob.glob(in_glob_path): - out_fname = in_fname.replace(in_path, out_path) - pth, _ = os.path.split(out_fname) - if not os.path.isdir(pth): - os.makedirs(pth) - shutil.copyfile(in_fname, out_fname) - out_fnames.append(out_fname) - return out_fnames - - -def filename_search_replace(sr_pairs, filename, backup=False): - ''' Search and replace for expressions in files - - ''' - with open(filename, 'rt') as in_fh: - in_txt = in_fh.read(-1) - out_txt = in_txt[:] - for in_exp, out_exp in sr_pairs: - in_exp = re.compile(in_exp) - out_txt = in_exp.sub(out_exp, out_txt) - if in_txt == out_txt: - return False - with open(filename, 'wt') as out_fh: - out_fh.write(out_txt) - if backup: - with open(filename + '.bak', 'wt') as bak_fh: - bak_fh.write(in_txt) - return True - - -def copy_replace(replace_pairs, - repo_path, - out_path, - cp_globs=('*',), - rep_globs=('*',), - renames = ()): - out_fnames = cp_files(repo_path, cp_globs, out_path) - renames = [(re.compile(in_exp), out_exp) for in_exp, out_exp in renames] - fnames = [] - for rep_glob in rep_globs: - fnames += fnmatch.filter(out_fnames, rep_glob) - if verbose: - print('\n'.join(fnames)) - for fname in fnames: - filename_search_replace(replace_pairs, fname, False) - for in_exp, out_exp in renames: - new_fname, n = in_exp.subn(out_exp, fname) - if n: - os.rename(fname, new_fname) - break - - -def make_link_targets(proj_name, - user_name, - repo_name, - known_link_fname, - out_link_fname, - url=None, - ml_url=None): - """ Check and make link targets - - If url is None or ml_url is None, check if there are links present for these - in `known_link_fname`. If not, raise error. The check is: - - Look for a target `proj_name`. - Look for a target `proj_name` + ' mailing list' - - Also, look for a target `proj_name` + 'github'. If this exists, don't write - this target into the new file below. - - If we are writing any of the url, ml_url, or github address, then write new - file with these links, of form: - - .. _`proj_name` - .. _`proj_name`: url - .. _`proj_name` mailing list: url - """ - with open(known_link_fname, 'rt') as link_fh: - link_contents = link_fh.readlines() - have_url = not url is None - have_ml_url = not ml_url is None - have_gh_url = None - for line in link_contents: - if not have_url: - match = re.match(r'..\s+_`%s`:\s+' % proj_name, line) - if match: - have_url = True - if not have_ml_url: - match = re.match(r'..\s+_`%s mailing list`:\s+' % proj_name, line) - if match: - have_ml_url = True - if not have_gh_url: - match = re.match(r'..\s+_`%s github`:\s+' % proj_name, line) - if match: - have_gh_url = True - if not have_url or not have_ml_url: - raise RuntimeError('Need command line or known project ' - 'and / or mailing list URLs') - lines = [] - if not url is None: - lines.append('.. _`%s`: %s\n' % (proj_name, url)) - if not have_gh_url: - gh_url = 'http://github.com/%s/%s\n' % (user_name, repo_name) - lines.append('.. _`%s github`: %s\n' % (proj_name, gh_url)) - if not ml_url is None: - lines.append('.. _`%s mailing list`: %s\n' % (proj_name, ml_url)) - if len(lines) == 0: - # Nothing to do - return - # A neat little header line - lines = ['.. %s\n' % proj_name] + lines - with open(out_link_fname, 'wt') as out_links: - out_links.writelines(lines) - - -USAGE = ''' - -If not set with options, the repository name is the same as the - -If not set with options, the main github user is the same as the -repository name.''' - - -GITWASH_CENTRAL = 'git://github.com/matthew-brett/gitwash.git' -GITWASH_BRANCH = 'master' - - -def main(): - parser = OptionParser() - parser.set_usage(parser.get_usage().strip() + USAGE) - parser.add_option("--repo-name", dest="repo_name", - help="repository name - e.g. nitime", - metavar="REPO_NAME") - parser.add_option("--github-user", dest="main_gh_user", - help="github username for main repo - e.g fperez", - metavar="MAIN_GH_USER") - parser.add_option("--gitwash-url", dest="gitwash_url", - help="URL to gitwash repository - default %s" - % GITWASH_CENTRAL, - default=GITWASH_CENTRAL, - metavar="GITWASH_URL") - parser.add_option("--gitwash-branch", dest="gitwash_branch", - help="branch in gitwash repository - default %s" - % GITWASH_BRANCH, - default=GITWASH_BRANCH, - metavar="GITWASH_BRANCH") - parser.add_option("--source-suffix", dest="source_suffix", - help="suffix of ReST source files - default '.rst'", - default='.rst', - metavar="SOURCE_SUFFIX") - parser.add_option("--project-url", dest="project_url", - help="URL for project web pages", - default=None, - metavar="PROJECT_URL") - parser.add_option("--project-ml-url", dest="project_ml_url", - help="URL for project mailing list", - default=None, - metavar="PROJECT_ML_URL") - (options, args) = parser.parse_args() - if len(args) < 2: - parser.print_help() - sys.exit() - out_path, project_name = args - if options.repo_name is None: - options.repo_name = project_name - if options.main_gh_user is None: - options.main_gh_user = options.repo_name - repo_path = clone_repo(options.gitwash_url, options.gitwash_branch) - try: - copy_replace((('PROJECTNAME', project_name), - ('REPONAME', options.repo_name), - ('MAIN_GH_USER', options.main_gh_user)), - repo_path, - out_path, - cp_globs=(pjoin('gitwash', '*'),), - rep_globs=('*.rst',), - renames=(('\.rst$', options.source_suffix),)) - make_link_targets(project_name, - options.main_gh_user, - options.repo_name, - pjoin(out_path, 'gitwash', 'known_projects.inc'), - pjoin(out_path, 'gitwash', 'this_project.inc'), - options.project_url, - options.project_ml_url) - finally: - shutil.rmtree(repo_path) - - -if __name__ == '__main__': - main() diff --git a/docs/iris/src/developers_guide/gitwash_get.sh b/docs/iris/src/developers_guide/gitwash_get.sh deleted file mode 100755 index c61b406603..0000000000 --- a/docs/iris/src/developers_guide/gitwash_get.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/env sh - - -# Get the latest gitwash_dumper.py from GitHub. -echo "Downloading latest gitwash_dumper.py from GitHub ..." -echo -curl -O https://raw.github.com/matthew-brett/gitwash/master/gitwash_dumper.py diff --git a/docs/iris/src/developers_guide/graphics_tests.rst b/docs/iris/src/developers_guide/graphics_tests.rst deleted file mode 100644 index 684ccfa4ab..0000000000 --- a/docs/iris/src/developers_guide/graphics_tests.rst +++ /dev/null @@ -1,117 +0,0 @@ -.. _developer_graphics_tests: - -Graphics tests -************** - -The only practical way of testing plotting functionality is to check actual -output plots. -For this, a basic 'graphics test' assertion operation is provided in the method -:meth:`iris.tests.IrisTest.check_graphic` : This tests plotted output for a -match against a stored reference. -A "graphics test" is any test which employs this. - -At present (Iris version 1.10), such tests include the testing for modules -`iris.tests.test_plot` and `iris.tests.test_quickplot`, and also some other -'legacy' style tests (as described in :ref:`developer_tests`). -It is conceivable that new 'graphics tests' of this sort can still be added. -However, as graphics tests are inherently "integration" style rather than true -unit tests, results can differ with the installed versions of dependent -libraries (see below), so this is not recommended except where no alternative -is practical. - -Testing actual plot results introduces some significant difficulties : - * Graphics tests are inherently 'integration' style tests, so results will - often vary with the versions of key dependencies, i.e. the exact versions of - third-party modules which are installed : Obviously, results will depend on - the matplotlib version, but they can also depend on numpy and other - installed packages. - * Although it seems possible in principle to accommodate 'small' result changes - by distinguishing plots which are 'nearly the same' from those which are - 'significantly different', in practice no *automatic* scheme for this can be - perfect : That is, any calculated tolerance in output matching will allow - some changes which a human would judge as a significant error. - * Storing a variety of alternative 'acceptable' results as reference images - can easily lead to uncontrolled increases in the size of the repository, - given multiple independent sources of variation. - - -Graphics Testing Strategy -========================= - -Prior to Iris 1.10, all graphics tests compared against a stored reference -image with a small tolerance on pixel values. - -From Iris v1.11 onward, we want to support testing Iris against multiple -versions of matplotlib (and some other dependencies). -To make this manageable, we have now rewritten "check_graphic" to allow -multiple alternative 'correct' results without including many more images in -the Iris repository. -This consists of : - - * using a perceptual 'image hash' of the outputs (see - https://github.com/JohannesBuchner/imagehash) as the basis for checking - test results. - * storing the hashes of 'known accepted results' for each test in a - database in the repo (which is actually stored in - ``lib/iris/tests/results/imagerepo.json``). - * storing associated reference images for each hash value in a separate public - repository, currently in https://github.com/SciTools/test-images-scitools , - allowing human-eye judgement of 'valid equivalent' results. - * a new version of the 'iris/tests/idiff.py' assists in comparing proposed - new 'correct' result images with the existing accepted ones. - -BRIEF... -There should be sufficient work-flow detail here to allow an iris developer to: - - * understand the new check graphic test process - * understand the steps to take and tools to use to add a new graphic test - * understand the steps to take and tools to use to diagnose and fix an graphic test failure - - -Basic workflow -============== - -If you notice that a graphics test in the Iris testing suite has failed -following changes in Iris or any of its dependencies, this is the process -you now need to follow: - -#. Create a directory in iris/lib/iris/tests called 'result_image_comparison'. -#. From your Iris root directory, run the tests by using the command: - ``python setup.py test``. -#. Navigate to iris/lib/iris/tests and run the command: ``python idiff.py``. - This will open a window for you to visually inspect the changes to the - graphic and then either accept or reject the new result. -#. Upon acceptance of a change or a new image, a copy of the output PNG file - is added to the reference image repository in - https://github.com/SciTools/test-images-scitools. The file is named - according to the image hash value, as ``.png``. -#. The hash value of the new result is added into the relevant set of 'valid - result hashes' in the image result database file, - ``tests/results/imagerepo.json``. -#. The tests must now be re-run, and the 'new' result should be accepted. - Occasionally there are several graphics checks in a single test, only the - first of which will be run should it fail. If this is the case, then you - may well encounter further graphical test failures in your next runs, and - you must repeat the process until all the graphical tests pass. -#. To add your changes to Iris, you need to make two pull requests. The first - should be made to the test-images-scitools repository, and this should - contain all the newly-generated png files copied into the folder named - 'image_files'. -#. The second pull request should be created in the Iris repository, and should - only include the change to the image results database - (``tests/results/imagerepo.json``) : - This pull request must contain a reference to the matching one in - test-images-scitools. - -Note: the Iris pull-request will not test out successfully in Travis until the -test-images-scitools pull request has been merged : This is because there is -an Iris test which ensures the existence of the reference images (uris) for all -the targets in the image results database. - - -Fixing a failing graphics test -============================== - - -Adding a new graphics test -========================== diff --git a/docs/iris/src/developers_guide/index.rst b/docs/iris/src/developers_guide/index.rst deleted file mode 100644 index a1ecd0756f..0000000000 --- a/docs/iris/src/developers_guide/index.rst +++ /dev/null @@ -1,40 +0,0 @@ -.. - ########################################################################## - (C) British Crown Copyright 2010 - 2012, Met Office - - This file is part of Iris. - - Iris is free software: you can redistribute it and/or modify it under - the terms of the GNU Lesser General Public License as published by the - Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - Iris is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public License - along with Iris. If not, see . - - ########################################################################## - - -.. _userguide-index: - -.. This is the source doc for the user guide - -##################### - Iris developer guide -##################### - - -.. toctree:: - :maxdepth: 3 - - documenting/index.rst - gitwash/index.rst - pulls.rst - tests.rst - deprecations.rst - release.rst diff --git a/docs/iris/src/developers_guide/pulls.rst b/docs/iris/src/developers_guide/pulls.rst deleted file mode 100644 index 6546a15642..0000000000 --- a/docs/iris/src/developers_guide/pulls.rst +++ /dev/null @@ -1,116 +0,0 @@ -.. _pr_check: - -Pull Request Check List -*********************** - -A pull request to a SciTools project master should be ready to merge into the -master branch. - -All pull request will be reviewed by a core developer who will manage the -process of merging. It is the responsibility of a developer submitting a -pull request to do their best to deliver a pull request which meets the -requirements of the project it is submitted to. - -The check list summarises criteria which will be checked before a pull request -is merged. Before submitting a pull request please consider this list. - - -The Iris Check List -==================== - -* Have you provided a helpful description of the Pull Request? - I.E. what has changed and why. This should include: - - * the aim of the change ; the problem addressed ; a link to the issue. - * how the change has been delivered. - * a "What's New" entry, submitted as a new file added in the pull request. - See `Contributing a "What's New" entry`_. - -* Do all the tests pass locally? - - * The Iris tests may be run with ``python setup.py test`` which has a command - line utility included. - -* Have new tests been provided for all additional functionality? - -* Do all modified and new sourcefiles pass PEP8? - - * PEP8_ is the Python source code style guide. - * There is a python module for checking pep8 compliance: python-pep8_ - * a standard Iris test checks that all sourcefiles meet PEP8 compliance - (see "iris.tests.test_coding_standards.TestCodeFormat"). - -* Do all modified and new sourcefiles have a correct, up-to-date copyright - header? - - * a standard Iris test checks that all sourcefiles include a copyright - message, including the correct year of the latest change - (see "iris.tests.test_coding_standards.TestLicenseHeaders"). - -* Has the documentation been updated to explain all new or changed features? - - * refer to the developer guide on docstrings_ - -* Have code examples been provided inside docstrings, where relevant? - - * these are strongly recommended as concrete (working) examples always - considerably enhance the documentation. - - * live test code can be included in docstrings. - - * See for example :data:`iris.cube.Cube.data` - * Details at http://www.sphinx-doc.org/en/stable/ext/doctest.html - - * The documentation tests may be run with ``make doctest``, from within the - ``./docs/iris`` subdirectory. - -* Have you provided a 'whats new' contribution? - - * this should be done for all changes that affect API or behaviour. - See :ref:`whats_new_contributions` - -* Does the documentation build without errors? - - * The documentation is built using ``make html`` in ``./docs/iris``. - -* Do the documentation and code-example tests pass? - - * Run with ``make doctest`` and ``make extest``, from within the subdirectory - ``./docs/iris``. - * note that code examples must *not* raise deprecations. This is now checked - and will result in an error. - When an existing code example encounters a deprecation, it must be fixed. - -* Has the travis file been updated to reflect any dependency updates? - - * ``./.travis.yml`` is used to manage the continuous integration testing. - * the files ``./conda-requirements.yml`` and - ``./minimal-conda-requirements.yml`` are used to define the software - environments used, using the conda_ package manager. - -* Have you provided updates to supporting projects for test or example data? - - * the following separate repos are used to manage larger files used by tests - and code examples : - - * iris-test-data_ is a github project containing all the data to support the - tests. - * iris-sample-data_ is a github project containing all the data to support - the gallery and examples. - * test-images-scitools_ is a github project containing reference plot images - to support iris graphics tests : see :ref:`developer_graphics_tests`. - - * If new files are required by tests or code examples, they must be added to - the appropriate supporting project via a suitable pull-request. - This new 'supporting pull request' should be referenced in the main Iris - pull request, and must be accepted and merged before the Iris one can be. - - -.. _PEP8: http://www.python.org/dev/peps/pep-0008/ -.. _python-pep8: https://pypi.python.org/pypi/pep8 -.. _conda: http://conda.readthedocs.io/en/latest/ -.. _iris-test-data: https://github.com/SciTools/iris-test-data -.. _iris-sample-data: https://github.com/SciTools/iris-sample-data -.. _test-images-scitools: https://github.com/SciTools/test-images-scitools -.. _docstrings: http://scitools.org.uk/iris/docs/latest/developers_guide/documenting/docstrings.html -.. _Contributing a "What's New" entry: http://scitools.org.uk/iris/docs/latest/developers_guide/documenting/whats_new_contributions.html diff --git a/docs/iris/src/developers_guide/release.rst b/docs/iris/src/developers_guide/release.rst deleted file mode 100644 index 437478a6a0..0000000000 --- a/docs/iris/src/developers_guide/release.rst +++ /dev/null @@ -1,75 +0,0 @@ -.. _iris_development_releases: - -Releases -******** - -A release of Iris is a tag on the SciTools/Iris Github repository. - -Release Branch -============== - -Once the features intended for the release are on master, a release branch should be created, in the SciTools/Iris repository. This will have the name: - - :literal:`{major release number}.{minor release number}.x` - -for example: - - :literal:`v1.9.x` - -This branch shall be used to finalise the release details in preparation for the release candidate. - -Release Candidate -================= - -Prior to a release, a release candidate tag may be created, marked as a pre-release in github, with a tag ending with :literal:`rc` followed by a number, e.g.: - - :literal:`v1.9.0rc1` - -If created, the pre-release shall be available for at least one week prior to the release being cut. User groups should be notified of the existence of the pre-release and encouraged to test the functionality. - -A pre-release is expected for a minor release, but not normally provided for a point release. - -If new features are required for a release after a release candidate has been cut, a new pre-release shall be issued first. - -Documentation -============= - -The documentation should include all of the what's new snippets, which must be compiled into a what's new. This content should be reviewed and adapted as required and the snippets removed from the branch to produce a coherent what's new page. - -Upon release, the documentation shall be added to the SciTools scitools.org.uk github project's gh-pages branch as the latest documentation. - -Testing the Conda Recipe -======================== - -Before a release is cut, the SciTools conda-recipes-scitools recipe for Iris shall be tested to build the release branch of Iris; this test recipe shall not be merged onto conda-recipes-scitools. - -The Release -=========== - -The final steps are to change the version string in the source of :literal:`Iris.__init__.py` and include the release date in the relevant what's new page within the documentation. - -Once all checks are complete, the release is cut by the creation of a new tag in the SciTools Iris repository. - -Conda Recipe -============ - -Once a release is cut, the SciTools conda-recipes-scitools recipe for Iris shall be updated to build the latest release of Iris and push this artefact to anaconda.org. The build and push is all automated as part of the merge process. - -Merge Back -========== - -After the release is cut, the changes shall be merged back onto the scitools master. - -To achieve this, first cut a local branch from the release branch, :literal:`{release}.x`. Next add a commit changing the release string to match the release string on scitools/master. -This branch can now be proposed as a pull request to master. This work flow ensures that the commit identifiers are consistent between the :literal:`.x` branch and :literal:`master`. - -Point Releases -============== - -Bug fixes may be implemented and targeted as the :literal:`.x` branch. These should lead to a new point release, another tag. -For example, a fix for a problem with 1.9.0 will be merged into 1.9.x, and then released by tagging 1.9.1. - -New features shall not be included in a point release, these are for bug fixes. - -A point release does not require a release candidate, but the rest of the release process is to be followed, including the merge back of changes into :literal:`master`. - diff --git a/docs/iris/src/developers_guide/tests.rst b/docs/iris/src/developers_guide/tests.rst deleted file mode 100644 index 929073b569..0000000000 --- a/docs/iris/src/developers_guide/tests.rst +++ /dev/null @@ -1,154 +0,0 @@ -.. _developer_tests: - -Testing -******* - -The Iris tests may be run with ``python setup.py test`` which has a -command line utility included. - -There are three categories of tests within Iris: - - Unit tests - - Integration tests - - Legacy tests - -Ideally, all code changes should be accompanied by one or more unit -tests, and by zero or more integration tests. And where possible, new -tests should not be added to the legacy tests. - -But if in any doubt about what tests to add or how to write them please -feel free to submit a pull-request in any state and ask for assistance. - - -Unit tests -========== - -Code changes should be accompanied by enough unit tests to give a -high degree of confidence that the change works as expected. In -addition, the unit tests can help describe the intent behind a change. - -The docstring for each test module must state the unit under test. -For example: - - :literal:`"""Unit tests for the \`iris.experimental.raster.export_geotiff\` function."""` - -All unit tests must be placed and named according to the following -structure: - -Classes -------- -When testing a class all the tests must reside in the module: - - :literal:`lib/iris/tests/unit//test_.py` - -Within this test module each tested method must have one or more -corresponding test classes: -- Either: `Test_name_of_public_method` -- Or: `Test_name_of_public_method__aspect_of_method` - -And within those test classes, the test methods must be named according -to the aspect of the tested method which they address. - -**Examples**: - -All unit tests for :py:class:`iris.cube.Cube` must reside in: - - :literal:`lib/iris/tests/unit/cube/test_Cube.py` - -Within that file the tests might look something like: - -.. code-block:: python - - # Tests for the Cube.xml() method. - class Test_xml(tests.IrisTest): - def test_some_general_stuff(self): - ... - - # Tests for the Cube.xml() method, focussing on the behaviour of - # the checksums. - class Test_xml__checksum(tests.IrisTest): - def test_checksum_ignores_masked_values(self): - ... - - # Tests for the Cube.add_dim_coord() method. - class Test_add_dim_coord(tests.IrisTest): - def test_normal_usage(self): - ... - - def test_coord_already_present(self): - ... - - -Functions ---------- -When testing a function all the tests must reside in the module: - - :literal:`lib/iris/tests/unit//test_.py` - -Within this test module there must be one or more test classes: -- Either: `Test` -- Or: `TestAspectOfFunction` - -And within those test classes, the test methods must be named according -to the aspect of the tested function which they address. - -**Examples**: - -All unit tests for :py:func:`iris.experimental.raster.export_geotiff` -must reside in: - - :literal:`lib/iris/tests/unit/experimental/raster/test_export_geotiff.py` - -Within that file the tests might look something like: - -.. code-block:: python - - # Tests focussing on the handling of different data types. - class TestDtypeAndValues(tests.IrisTest): - def test_int16(self): - ... - - def test_int16_big_endian(self): - ... - - # Tests focussing on the handling of different projections. - class TestProjection(tests.IrisTest): - def test_no_ellipsoid(self): - ... - - -Integration tests -================= - -Some code changes may require tests which exercise several units in -order to demonstrate an important consequence of their interaction which -may not be apparent when considering the units in isolation. - -These tests must be placed in the `lib/iris/tests/integration` folder. -Unlike unit tests, there is no fixed naming scheme for integration -tests. But folders and files must be created as required to help -developers locate relevant tests. It is recommended they are named -according to the capabilities under test, e.g. -`metadata/test_pp_preservation.py`, and not named according to the -module(s) under test. - - -Graphics tests -================= -Certain Iris tests are based on checking plotted images. -This the only way of testing the modules :mod:`iris.plot` and -:mod:`iris.quickplot`, but is also used for some other legacy and integration- -style testcases. - -Prior to Iris version 1.10, a single reference image for each testcase was -stored in the main Iris repository, and a 'tolerant' comparison was performed -against this. - -From version 1.11 onwards, graphics testcase outputs are compared against -possibly *multiple* known-good images, of which only the signature is stored. -This uses a sophisticated perceptual "image hashing" scheme (see: -). -Only imagehash signatures are stored in the Iris repo itself, thus freeing up -valuable space. Meanwhile, the actual reference *images* -- which are required -for human-eyes evaluation of proposed new "good results" -- are all stored -elsewhere in a separate public repository. -See :ref:`developer_graphics_tests`. diff --git a/docs/iris/src/installing.rst b/docs/iris/src/installing.rst deleted file mode 100644 index 6b6999ab82..0000000000 --- a/docs/iris/src/installing.rst +++ /dev/null @@ -1,7 +0,0 @@ -.. _installing_iris: - -**************** -Installing Iris -**************** - -.. include:: ../../../INSTALL diff --git a/docs/iris/src/sphinxext/auto_label_figures.py b/docs/iris/src/sphinxext/auto_label_figures.py deleted file mode 100644 index 00f3aa96dc..0000000000 --- a/docs/iris/src/sphinxext/auto_label_figures.py +++ /dev/null @@ -1,39 +0,0 @@ -# (C) British Crown Copyright 2014 - 2015, Met Office -# -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -import os -from docutils import nodes - - -def auto_label_figures(app, doctree): - """ - Add a label on every figure. - """ - - for fig in doctree.traverse(condition=nodes.figure): - for img in fig.traverse(condition=nodes.image): - fname, ext = os.path.splitext(img['uri']) - if ext == '.png': - fname = os.path.basename(fname).replace('_', '-') - fig['ids'].append(fname) - - -def setup(app): - app.connect('doctree-read', auto_label_figures) diff --git a/docs/iris/src/sphinxext/custom_class_autodoc.py b/docs/iris/src/sphinxext/custom_class_autodoc.py deleted file mode 100644 index 25c095cb84..0000000000 --- a/docs/iris/src/sphinxext/custom_class_autodoc.py +++ /dev/null @@ -1,97 +0,0 @@ -# (C) British Crown Copyright 2010 - 2019, Met Office -# -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -from sphinx.ext import autodoc -from sphinx.ext.autodoc import * -from sphinx.util import force_decode -from sphinx.util.docstrings import prepare_docstring - -import inspect - - -class ClassWithConstructorDocumenter(autodoc.ClassDocumenter): - priority = 1000000 - - def get_object_members(self, want_all): - return autodoc.ClassDocumenter.get_object_members(self, want_all) - - @staticmethod - def can_document_member(member, mname, isattr, self): - return autodoc.ClassDocumenter.can_document_member(member, mname, - isattr, self) - - def get_doc(self, encoding=None): - content = self.env.config.autoclass_content - - docstrings = [] - docstring = self.get_attr(self.object, '__doc__', None) - if docstring: - docstrings.append(docstring) - - # for classes, what the "docstring" is can be controlled via a - # config value; the default is only the class docstring - if content in ('both', 'init'): - constructor = self.get_constructor() - if constructor: - initdocstring = self.get_attr(constructor, '__doc__', None) - else: - initdocstring = None - if initdocstring: - if content == 'init': - docstrings = [initdocstring] - else: - docstrings.append(initdocstring) - - return [prepare_docstring(force_decode(docstring, encoding)) - for docstring in docstrings] - - def get_constructor(self): - # for classes, the relevant signature is the __init__ method's - initmeth = self.get_attr(self.object, '__new__', None) - - if initmeth is None or initmeth is object.__new__ or not \ - (inspect.ismethod(initmeth) or inspect.isfunction(initmeth)): - initmeth = None - - if initmeth is None: - initmeth = self.get_attr(self.object, '__init__', None) - - if initmeth is None or initmeth is object.__init__ or \ - initmeth is object.__new__ or not \ - (inspect.ismethod(initmeth) or inspect.isfunction(initmeth)): - initmeth = None - - return initmeth - - def format_args(self): - initmeth = self.get_constructor() - try: - argspec = inspect.getargspec(initmeth) - except TypeError: - # still not possible: happens e.g. for old-style classes - # with __init__ in C - return None - if argspec[0] and argspec[0][0] in ('cls', 'self'): - del argspec[0][0] - return inspect.formatargspec(*argspec) - - -def setup(app): - app.add_autodocumenter(ClassWithConstructorDocumenter) diff --git a/docs/iris/src/sphinxext/custom_data_autodoc.py b/docs/iris/src/sphinxext/custom_data_autodoc.py deleted file mode 100644 index 29a4fda94c..0000000000 --- a/docs/iris/src/sphinxext/custom_data_autodoc.py +++ /dev/null @@ -1,62 +0,0 @@ -# (C) British Crown Copyright 2010 - 2015, Met Office -# -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -from sphinx.ext.autodoc import DataDocumenter, ModuleLevelDocumenter -try: - # Use 'object_description' in place of the former 'safe_repr' function. - from sphinx.util.inspect import object_description as safe_repr -except ImportError: - # 'safe_repr' is the old usage, for Sphinx<1.3. - from sphinx.util.inspect import safe_repr - -from iris.analysis import Aggregator - - -class IrisDataDocumenter(DataDocumenter): - priority = 100 - - def add_directive_header(self, sig): - ModuleLevelDocumenter.add_directive_header(self, sig) - if not self.options.annotation: - try: - objrepr = safe_repr(self.object) - except ValueError: - pass - else: - self.add_line(u' :annotation:', '') - elif self.options.annotation is object(): - pass - else: - self.add_line( - u' :annotation: {}'.format(self.options.annotation), - '') - - -def handler(app, what, name, obj, options, signature, return_annotation): - if what == 'data': - if isinstance(obj, object) and issubclass(obj.__class__, Aggregator): - signature = '()' - return_annotation = '{} instance.'.format(obj.__class__.__name__) - return signature, return_annotation - - -def setup(app): - app.add_autodocumenter(IrisDataDocumenter) - app.connect('autodoc-process-signature', handler) diff --git a/docs/iris/src/sphinxext/gen_example_directory.py b/docs/iris/src/sphinxext/gen_example_directory.py deleted file mode 100644 index 60863010c5..0000000000 --- a/docs/iris/src/sphinxext/gen_example_directory.py +++ /dev/null @@ -1,182 +0,0 @@ -# (C) British Crown Copyright 2010 - 2015, Met Office -# -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . - - -''' -Generate the rst files for the examples -''' - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -import os -import re -import shutil -import sys - - -def out_of_date(original, derived): - ''' - Returns True if derivative is out-of-date wrt original, - both of which are full file paths. - - TODO: this check isn't adequate in some cases, e.g., if we discover - a bug when building the examples, the original and derived will be - unchanged but we still want to force a rebuild. - ''' - return (not os.path.exists(derived) or - os.stat(derived).st_mtime < os.stat(original).st_mtime) - - -docstring_regex = re.compile(r'[\'\"]{3}(.*?)[\'\"]{3}', re.DOTALL) - - -noplot_regex = re.compile(r'#\s*-\*-\s*noplot\s*-\*-') - - -def generate_example_rst(app): - # Example code can be found at the same level as the documentation - # src folder. - rootdir = os.path.join(os.path.dirname(app.builder.srcdir), 'example_code') - - # Examples are built as a subfolder of the src folder. - exampledir = os.path.join(app.builder.srcdir, 'examples') - - if not os.path.exists(exampledir): - os.makedirs(exampledir) - - datad = {} - for root, subFolders, files in os.walk(rootdir): - for fname in files: - if (fname.startswith('.') or fname.startswith('#') or - fname.startswith('_') or fname.find('.svn') >= 0 or - not fname.endswith('.py')): - continue - - fullpath = os.path.join(root, fname) - with open(fullpath) as fh: - contents = fh.read() - # indent - relpath = os.path.split(root)[-1] - datad.setdefault(relpath, []).append((fullpath, fname, contents)) - - subdirs = sorted(datad.keys()) - - index = [] - index.append('''\ -Iris examples -============= - -.. toctree:: - :maxdepth: 2 - -''') - - for subdir in subdirs: - rstdir = os.path.join(exampledir, subdir) - if not os.path.exists(rstdir): - os.makedirs(rstdir) - - outputdir = os.path.join(app.builder.outdir, 'examples') - if not os.path.exists(outputdir): - os.makedirs(outputdir) - - outputdir = os.path.join(outputdir, subdir) - if not os.path.exists(outputdir): - os.makedirs(outputdir) - - index.append(' {}/index.rst\n'.format(subdir)) - subdir_root_path = os.path.join(rootdir, subdir) - subdirIndex = [] - - # Use the __init__.py file's docstring for the subdir example page (if - # __init__ exists). - if os.path.exists(os.path.join(subdir_root_path, '__init__.py')): - import imp - mod = imp.load_source( - subdir, - os.path.join(subdir_root_path, '__init__.py')) - subdirIndex.append(mod.__doc__) - else: - line = 'Examples in {}\n'.format(subdir) - subdirIndex.extend([line, '=' * len(line)]) - - # Append the code to produce the toctree. - subdirIndex.append(''' -.. toctree:: - :maxdepth: 1 - -''') - - sys.stdout.write(subdir + ', ') - sys.stdout.flush() - - data = sorted(datad[subdir]) - - for fullpath, fname, contents in data: - basename, ext = os.path.splitext(fname) - outputfile = os.path.join(outputdir, fname) - - rstfile = '{}.rst'.format(basename) - outrstfile = os.path.join(rstdir, rstfile) - - subdirIndex.append(' {}\n'.format(rstfile)) - - if not out_of_date(fullpath, outrstfile): - continue - - out = [] - out.append('.. _{}-{}:\n\n'.format(subdir, basename)) - - # Copy the example code to be in the src examples directory. This - # means we can define a simple relative path in the plot directive, - # which can also copy the file into the resulting build directory. - shutil.copy(fullpath, rstdir) - - docstring_results = docstring_regex.search(contents) - if docstring_results is not None: - out.append(docstring_results.group(1)) - else: - title = '{} example code: {}'.format(subdir, fname) - out.append(title + '\n') - out.append('=' * len(title) + '\n\n') - - if not noplot_regex.search(contents): - rel_example = os.path.relpath(outputfile, app.builder.outdir) - out.append('\n\n.. plot:: {}\n'.format(rel_example)) - out.append(' :include-source:\n\n') - else: - out.append('[`source code <{}>`_]\n\n'.format(fname)) - out.append('.. literalinclude:: {}\n\n'.format(fname)) - # Write the .py file contents (we didn't need to do this for - # plots as the plot directive does this for us.) - with open(outputfile, 'w') as fhstatic: - fhstatic.write(contents) - - with open(outrstfile, 'w') as fh: - fh.writelines(out) - - subdirIndexFile = os.path.join(rstdir, 'index.rst') - with open(subdirIndexFile, 'w') as fhsubdirIndex: - fhsubdirIndex.writelines(subdirIndex) - - with open(os.path.join(exampledir, 'index.rst'), 'w') as fhindex: - fhindex.writelines(index) - - -def setup(app): - app.connect('builder-inited', generate_example_rst) diff --git a/docs/iris/src/sphinxext/gen_gallery.py b/docs/iris/src/sphinxext/gen_gallery.py deleted file mode 100644 index 1dabf919f7..0000000000 --- a/docs/iris/src/sphinxext/gen_gallery.py +++ /dev/null @@ -1,204 +0,0 @@ -# -# (C) Copyright 2012 MATPLOTLIB (vn 1.2.0) -# - -''' -Generate a thumbnail gallery of examples. -''' - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -import os -import glob -import re -import warnings - -import matplotlib.image as image -from sphinx.util import status_iterator - -from sphinx.util import status_iterator - -template = '''\ -{{% extends "layout.html" %}} -{{% set title = "Thumbnail gallery" %}} - - -{{% block body %}} - -

    Click on any image to see full size image and source code

    -
    - - - -{} -{{% endblock %}} -''' - -multiimage = re.compile('(.*?)(_\d\d){1,2}') - - -def make_thumbnail(args): - image.thumbnail(args[0], args[1], 0.4) - - -def out_of_date(original, derived): - return (not os.path.exists(derived) or - os.stat(derived).st_mtime < os.stat(original).st_mtime) - - -def gen_gallery(app, doctree): - if app.builder.name != 'html': - return - - outdir = app.builder.outdir - rootdir = 'examples' - - # Images we want to skip for the gallery because they are an unusual - # size that doesn't layout well in a table, or because they may be - # redundant with other images or uninteresting. - skips = set([ - 'mathtext_examples', - 'matshow_02', - 'matshow_03', - 'matplotlib_icon']) - - thumbnails = {} - rows = [] - random_image = [] - toc_rows = [] - - link_template = ('' - '{alternative_text}' - '') - - header_template = ('
    ' - '

    {}' - '' - '

    ') - - toc_template = ('
  • ' - '{}' - '
  • ') - - random_image_content_template = ''' -// This file was automatically generated by gen_gallery.py & should not be -// modified directly. - -images = new Array(); - -{} - -''' - - random_image_template = "['{thumbfile}', '{full_image}', '{link}'];" - random_image_join = 'images[{}] = {}' - - dirs = ('General', 'Meteorology', 'Oceanography') - - for subdir in dirs: - rows.append(header_template.format(subdir, subdir, subdir)) - toc_rows.append(toc_template.format(subdir, subdir)) - - origdir = os.path.join(os.path.dirname(outdir), rootdir, subdir) - if not os.path.exists(origdir): - origdir = os.path.join(os.path.dirname(outdir), 'plot_directive', - rootdir, subdir) - thumbdir = os.path.join(outdir, rootdir, subdir, 'thumbnails') - if not os.path.exists(thumbdir): - os.makedirs(thumbdir) - - data = [] - - for filename in sorted(glob.glob(os.path.join(origdir, '*.png'))): - if filename.endswith('hires.png'): - continue - - path, filename = os.path.split(filename) - basename, ext = os.path.splitext(filename) - if basename in skips: - continue - - # Create thumbnails based on images in tmpdir, and place them - # within the build tree. - orig_path = str(os.path.join(origdir, filename)) - thumb_path = str(os.path.join(thumbdir, filename)) - if out_of_date(orig_path, thumb_path) or True: - thumbnails[orig_path] = thumb_path - - m = multiimage.match(basename) - if m is not None: - basename = m.group(1) - - data.append((subdir, basename, - os.path.join(rootdir, subdir, 'thumbnails', - filename))) - - for (subdir, basename, thumbfile) in data: - if thumbfile is not None: - anchor = os.path.basename(thumbfile) - anchor = os.path.splitext(anchor)[0].replace('_', '-') - link = 'examples/{}/{}.html#{}'.format( - subdir, - basename, - anchor) - rows.append(link_template.format( - href=link, - thumb_file=thumbfile, - alternative_text=basename)) - random_image.append(random_image_template.format( - link=link, - thumbfile=thumbfile, - basename=basename, - full_image='_images/' + os.path.basename(thumbfile))) - - if len(data) == 0: - warnings.warn('No thumbnails were found in {}'.format(subdir)) - - # Close out the
    opened up at the top of this loop. - rows.append('
    ') - - # Generate JS list of images for front page. - random_image_content = '\n'.join([random_image_join.format(i, line) - for i, line in enumerate(random_image)]) - random_image_content = random_image_content_template.format( - random_image_content) - random_image_script_path = os.path.join(app.builder.srcdir, - '_static', - 'random_image.js') - with open(random_image_script_path, 'w') as fh: - fh.write(random_image_content) - - content = template.format('\n'.join(toc_rows), - '\n'.join(rows)) - - # Only write out the file if the contents have actually changed. - # Otherwise, this triggers a full rebuild of the docs. - - gallery_path = os.path.join(app.builder.srcdir, - '_templates', - 'gallery.html') - if os.path.exists(gallery_path): - with open(gallery_path, 'r') as fh: - regenerate = fh.read() != content - else: - regenerate = True - if regenerate: - with open(gallery_path, 'w') as fh: - fh.write(content) - - for key in status_iterator(thumbnails, 'generating thumbnails... ', - length=len(thumbnails)): - image.thumbnail(key, thumbnails[key], 0.3) - - -def setup(app): - app.connect('env-updated', gen_gallery) diff --git a/docs/iris/src/sphinxext/generate_package_rst.py b/docs/iris/src/sphinxext/generate_package_rst.py deleted file mode 100644 index f67efc7ebc..0000000000 --- a/docs/iris/src/sphinxext/generate_package_rst.py +++ /dev/null @@ -1,315 +0,0 @@ -# (C) British Crown Copyright 2010 - 2015, Met Office -# -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -import os -import sys -import re -import inspect - - -document_dict = { - # Use autoclass for classes. - 'class': ''' -{object_docstring} - -.. - - .. autoclass:: {object_name} - :members: - :undoc-members: - :inherited-members: - -''', - 'function': ''' -.. autofunction:: {object_name} - -''', - # For everything else, let automodule do some magic... - None: ''' - -.. autodata:: {object_name} - -'''} - - -horizontal_sep = ''' -.. raw:: html - -

    ↑ top ↑

    - - -''' - - -def lookup_object_type(obj): - if inspect.isclass(obj): - return 'class' - elif inspect.isfunction(obj): - return 'function' - else: - return None - - -def auto_doc_module(file_path, import_name, root_package, - package_toc=None, title=None): - doc = r'''.. _{import_name}: - -{title_underline} -{title} -{title_underline} - -{sidebar} - -.. currentmodule:: {root_package} - -.. automodule:: {import_name} - -In this module: - -{module_elements} - - -''' - if package_toc: - sidebar = ''' -.. sidebar:: Modules in this package - -{package_toc_tree} - - '''.format(package_toc_tree=package_toc) - else: - sidebar = '' - - try: - mod = __import__(import_name) - except ImportError as e: - message = r'''.. error:: - - This module could not be imported. Some dependencies are missing:: - - ''' + str(e) - return doc.format(title=title or import_name, - title_underline='=' * len(title or import_name), - import_name=import_name, root_package=root_package, - sidebar=sidebar, module_elements=message) - - mod = sys.modules[import_name] - elems = dir(mod) - - if '__all__' in elems: - document_these = [(attr_name, getattr(mod, attr_name)) - for attr_name in mod.__all__] - else: - document_these = [(attr_name, getattr(mod, attr_name)) - for attr_name in elems - if (not attr_name.startswith('_') and - not inspect.ismodule(getattr(mod, attr_name)))] - - def is_from_this_module(arg): - name = arg[0] - obj = arg[1] - return (hasattr(obj, '__module__') and - obj.__module__ == mod.__name__) - - sort_order = {'class': 2, 'function': 1} - - # Sort them according to sort_order dict. - def sort_key(arg): - name = arg[0] - obj = arg[1] - return sort_order.get(lookup_object_type(obj), 0) - - document_these = filter(is_from_this_module, document_these) - document_these = sorted(document_these, key=sort_key) - - lines = [] - for element, obj in document_these: - object_name = import_name + '.' + element - obj_content = document_dict[lookup_object_type(obj)].format( - object_name=object_name, - object_name_header_line='+' * len(object_name), - object_docstring=inspect.getdoc(obj)) - lines.append(obj_content) - - lines = horizontal_sep.join(lines) - - module_elements = '\n'.join(' * :py:obj:`{}`'.format(element) - for element, obj in document_these) - - lines = doc + lines - return lines.format(title=title or import_name, - title_underline='=' * len(title or import_name), - import_name=import_name, root_package=root_package, - sidebar=sidebar, module_elements=module_elements) - - -def auto_doc_package(file_path, import_name, root_package, sub_packages): - max_depth = 1 if import_name == 'iris' else 2 - package_toc = '\n '.join(sub_packages) - package_toc = ''' - .. toctree:: - :maxdepth: {:d} - :titlesonly: - - {} - - -'''.format(max_depth, package_toc) - - if '.' in import_name: - title = None - else: - title = import_name.capitalize() + ' reference documentation' - - return auto_doc_module(file_path, import_name, root_package, - package_toc=package_toc, title=title) - - -def auto_package_build(app): - root_package = app.config.autopackage_name - if root_package is None: - raise ValueError('set the autopackage_name variable in the ' - 'conf.py file') - - if not isinstance(root_package, list): - raise ValueError('autopackage was expecting a list of packages to ' - 'document e.g. ["itertools"]') - - for package in root_package: - do_package(package) - - -def do_package(package_name): - out_dir = package_name + os.path.sep - - # Import the root package. If this fails then an import error will be - # raised. - module = __import__(package_name) - root_package = package_name - rootdir = os.path.dirname(module.__file__) - - package_folder = [] - module_folders = {} - for root, subFolders, files in os.walk(rootdir): - for fname in files: - name, ext = os.path.splitext(fname) - - # Skip some non-relevant files. - if (fname.startswith('.') or fname.startswith('#') or - re.search('^_[^_]', fname) or fname.find('.svn') >= 0 or - not (ext in ['.py', '.so'])): - continue - - # Handle new shared library naming conventions - if ext == '.so': - name = name.split('.', 1)[0] - - rel_path = root_package + \ - os.path.join(root, fname).split(rootdir)[-1] - mod_folder = root_package + \ - os.path.join(root).split(rootdir)[-1].replace('/', '.') - - # Only add this package to folder list if it contains an __init__ - # script. - if name == '__init__': - package_folder.append([mod_folder, rel_path]) - else: - import_name = mod_folder + '.' + name - mf_list = module_folders.setdefault(mod_folder, []) - mf_list.append((import_name, rel_path)) - if not os.path.exists(out_dir): - os.makedirs(out_dir) - - for package, package_path in package_folder: - if '._' in package or 'test' in package: - continue - - paths = [] - for spackage, spackage_path in package_folder: - # Ignore this packages, packages that are not children of this - # one, test packages, private packages, and packages that are - # subpackages of subpackages (they'll be part of the subpackage). - if spackage == package: - continue - if not spackage.startswith(package): - continue - if spackage.count('.') > package.count('.') + 1: - continue - if 'test' in spackage: - continue - - split_path = spackage.rsplit('.', 2)[-2:] - if any(part[0] == '_' for part in split_path): - continue - - paths.append(os.path.join(*split_path) + '.rst') - - paths.extend(os.path.join(os.path.basename(os.path.dirname(path)), - os.path.basename(path).split('.', 1)[0]) - for imp_name, path in module_folders.get(package, [])) - - paths.sort() - doc = auto_doc_package(package_path, package, root_package, paths) - - package_dir = out_dir + package.replace('.', os.path.sep) - if not os.path.exists(package_dir): - os.makedirs(out_dir + package.replace('.', os.path.sep)) - - out_path = package_dir + '.rst' - if not os.path.exists(out_path): - print('Creating non-existent document {} ...'.format(out_path)) - with open(out_path, 'w') as fh: - fh.write(doc) - else: - with open(out_path, 'r') as fh: - existing_content = ''.join(fh.readlines()) - if doc != existing_content: - print('Creating out of date document {} ...'.format( - out_path)) - with open(out_path, 'w') as fh: - fh.write(doc) - - for import_name, module_path in module_folders.get(package, []): - doc = auto_doc_module(module_path, import_name, root_package) - out_path = out_dir + import_name.replace('.', os.path.sep) + '.rst' - if not os.path.exists(out_path): - print('Creating non-existent document {} ...'.format( - out_path)) - with open(out_path, 'w') as fh: - fh.write(doc) - else: - with open(out_path, 'r') as fh: - existing_content = ''.join(fh.readlines()) - if doc != existing_content: - print('Creating out of date document {} ...'.format( - out_path)) - with open(out_path, 'w') as fh: - fh.write(doc) - - -def setup(app): - app.connect('builder-inited', auto_package_build) - app.add_config_value('autopackage_name', None, 'env') diff --git a/docs/iris/src/userguide/citation.rst b/docs/iris/src/userguide/citation.rst deleted file mode 100644 index 01b655574e..0000000000 --- a/docs/iris/src/userguide/citation.rst +++ /dev/null @@ -1,55 +0,0 @@ -.. _Citing_Iris: - -=========== -Citing Iris -=========== - -If Iris played an important part in your research then please add us to your reference list by using one of the recommendations below. - -************ -BibTeX entry -************ - -For example:: - - @manual{Iris, - author = {{Met Office}}, - title = {Iris: A Python library for analysing and visualising meteorological and oceanographic data sets}, - edition = {v1.2}, - year = {2010 - 2013}, - address = {Exeter, Devon }, - url = {http://scitools.org.uk/} - } - - -******************* -Downloaded Software -******************* - -Suggested format:: - - ProductName. Version. ReleaseDate. Publisher. Location. DOIorURL. DownloadDate. - -For example:: - - Iris. v1.2. 28-Feb-2013. Met Office. UK. https://github.com/SciTools/iris/archive/v1.2.0.tar.gz 01-03-2013 - - -******************** -Checked out Software -******************** - -Suggested format:: - - ProductName. Publisher. URL. CheckoutDate. RepositorySpecificCheckoutInformation. - -For example:: - - Iris. Met Office. git@github.com:SciTools/iris.git 06-03-2013 - -.. _How to cite and describe software: http://software.ac.uk/so-exactly-what-software-did-you-use - - -Reference: [Jackson]_. - -.. [Jackson] Jackson, M. 2012. `How to cite and describe software`_. Accessed 06-03-2013. diff --git a/docs/iris/src/userguide/code_maintenance.rst b/docs/iris/src/userguide/code_maintenance.rst deleted file mode 100644 index 00ba30506c..0000000000 --- a/docs/iris/src/userguide/code_maintenance.rst +++ /dev/null @@ -1,60 +0,0 @@ -Code Maintenance -================ - -From a user point of view "code maintenance" means ensuring that your existing -working code stays working, in the face of changes to Iris. - - -Stability and Change ---------------------- - -In practice, as Iris develops, most users will want to periodically upgrade -their installed version to access new features or at least bugfixes. - -This is obvious if you are still developing other code that uses Iris, or using -code from other sources. -However, even if you have only legacy code that remains untouched, some code -maintenance effort is probably still necessary : - - * On the one hand, *in principle*, working code will go on working, as long - as you don't change anything else. - - * However, such "version statis" can easily become a growing burden, if you - are simply waiting until an update becomes unavoidable : Often, that will - eventually occur when you need to update some other software component, - for some completely unconnected reason. - - -Principles of Change Management -------------------------------- - -When you upgrade software to a new version, you often find that you need to -rewrite your legacy code, simply to keep it working. - -In Iris, however, we aim to reduce code maintenance problems to an absolute -minimum by following defined change management rules. -These ensure that, *within a major release number* : - - * you can be confident that your code will still work with subsequent minor - releases - - * you will be aware of future incompatibility problems in advance - - * you can defer making code compatibility changes for some time, until it - suits you - -The above applies to minor version upgrades : e.g. code that works with version -"1.4.2" should still work with a subsequent minor release such as "1.5.0" or -"1.7.2". - -A *major* release however, e.g. "v2.0.0" or "v3.0.0", can include more -significant changes, including so-called "breaking" changes: This means that -existing code may need to be modified to make it work with the new version. - -Since breaking change can only occur at major releases, these are the *only* -times we can alter or remove existing behaviours (even deprecated -ones). This is what a major release is for : it enables the removal and -replacement of old features. - -Of course, even at a major release, we do still aim to keep breaking changes to -a minimum. diff --git a/docs/iris/src/userguide/concat.png b/docs/iris/src/userguide/concat.png deleted file mode 100644 index eb3d84046e..0000000000 Binary files a/docs/iris/src/userguide/concat.png and /dev/null differ diff --git a/docs/iris/src/userguide/concat.svg b/docs/iris/src/userguide/concat.svg deleted file mode 100644 index 0234b37bfa..0000000000 --- a/docs/iris/src/userguide/concat.svg +++ /dev/null @@ -1,782 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - image/svg+xml - - - - - - - x - - - - - - - - - - - - - - - - - 0 - - 1 - 2 - 3 - 0 - 1 - 2 - 3 - - - - - - t - y - t - y - - - - - - - - - - - - diff --git a/docs/iris/src/userguide/cube_diagram.dia b/docs/iris/src/userguide/cube_diagram.dia deleted file mode 100644 index 8edc611782..0000000000 Binary files a/docs/iris/src/userguide/cube_diagram.dia and /dev/null differ diff --git a/docs/iris/src/userguide/cube_diagram.png b/docs/iris/src/userguide/cube_diagram.png deleted file mode 100644 index 80f5328c3b..0000000000 Binary files a/docs/iris/src/userguide/cube_diagram.png and /dev/null differ diff --git a/docs/iris/src/userguide/cube_maths.rst b/docs/iris/src/userguide/cube_maths.rst deleted file mode 100644 index 6c6f846bc3..0000000000 --- a/docs/iris/src/userguide/cube_maths.rst +++ /dev/null @@ -1,217 +0,0 @@ -====================== -Basic cube mathematics -====================== - - -The section :doc:`navigating_a_cube` highlighted that -every cube has a data attribute; -this attribute can then be manipulated directly:: - - cube.data -= 273.15 - -The problem with manipulating the data directly is that other metadata may -become inconsistent; in this case the units of the cube are no longer what was -intended. This example could be rectified by changing the units attribute:: - - cube.units = 'celsius' - -.. note:: - - :meth:`iris.cube.Cube.convert_units` can be used to automatically convert a - cube's data and update its units attribute. - So, the two steps above can be achieved by:: - - cube.convert_units('celsius') - -In order to reduce the amount of metadata which becomes inconsistent, -fundamental arithmetic operations such as addition, subtraction, division -and multiplication can be applied directly to any cube. - -Calculating the difference between two cubes --------------------------------------------- - -Let's load some air temperature which runs from 1860 to 2100:: - - filename = iris.sample_data_path('E1_north_america.nc') - air_temp = iris.load_cube(filename, 'air_temperature') - -We can now get the first and last time slices using indexing -(see :ref:`subsetting_a_cube` for a reminder):: - - t_first = air_temp[0, :, :] - t_last = air_temp[-1, :, :] - -.. testsetup:: - - filename = iris.sample_data_path('E1_north_america.nc') - air_temp = iris.load_cube(filename, 'air_temperature') - t_first = air_temp[0, :, :] - t_last = air_temp[-1, :, :] - -And finally we can subtract the two. -The result is a cube of the same size as the original two time slices, -but with the data representing their difference: - - >>> print(t_last - t_first) - unknown / (K) (latitude: 37; longitude: 49) - Dimension coordinates: - latitude x - - longitude - x - Scalar coordinates: - forecast_reference_time: 1859-09-01 06:00:00 - height: 1.5 m - - -.. note:: - - Notice that the coordinates "time" and "forecast_period" have been removed - from the resultant cube; - this is because these coordinates differed between the two input cubes. - - -.. _cube-maths_anomaly: - -Calculating a cube anomaly --------------------------- - -In section :doc:`cube_statistics` we discussed how the dimensionality of a cube -can be reduced using the :meth:`Cube.collapsed ` method -to calculate a statistic over a dimension. - -Let's use that method to calculate a mean of our air temperature time-series, -which we'll then use to calculate a time mean anomaly and highlight the powerful -benefits of cube broadcasting. - -First, let's remind ourselves of the shape of our air temperature time-series -cube:: - - >>> print(air_temp.summary(True)) - air_temperature / (K) (time: 240; latitude: 37; longitude: 49) - -Now, we'll calculate the time-series mean using the -:meth:`Cube.collapsed ` method:: - - >>> air_temp_mean = air_temp.collapsed('time', iris.analysis.MEAN) - >>> print(air_temp_mean.summary(True)) - air_temperature / (K) (latitude: 37; longitude: 49) - -As expected the *time* dimension has been collapsed, reducing the -dimensionality of the resultant *air_temp_mean* cube. This time-series mean can -now be used to calculate the time mean anomaly against the original -time-series:: - - >>> anomaly = air_temp - air_temp_mean - >>> print(anomaly.summary(True)) - unknown / (K) (time: 240; latitude: 37; longitude: 49) - -Notice that the calculation of the *anomaly* involves subtracting a -*2d* cube from a *3d* cube to yield a *3d* result. This is only possible -because cube broadcasting is performed during cube arithmetic operations. - -Cube broadcasting follows similar broadcasting rules as -`NumPy `_, but -the additional richness of Iris coordinate meta-data provides an enhanced -capability beyond the basic broadcasting behaviour of NumPy. - -As the coordinate meta-data of a cube uniquely describes each dimension, it is -possible to leverage this knowledge to identify the similar dimensions involved -in a cube arithmetic operation. This essentially means that we are no longer -restricted to performing arithmetic on cubes with identical shapes. - -This extended broadcasting behaviour is highlighted in the following -examples. The first of these shows that it is possible to involve the -transpose of the air temperature time-series in an arithmetic operation with -itself. - -Let's first create the transpose of the air temperature time-series:: - - >>> air_temp_T = air_temp.copy() - >>> air_temp_T.transpose() - >>> print(air_temp_T.summary(True)) - air_temperature / (K) (longitude: 49; latitude: 37; time: 240) - -Now add the transpose to the original time-series:: - - >>> result = air_temp + air_temp_T - >>> print(result.summary(True)) - unknown / (K) (time: 240; latitude: 37; longitude: 49) - -Notice that the *result* is the same dimensionality and shape as *air_temp*. -Let's check that the arithmetic operation has calculated a result that -we would intuitively expect:: - - >>> result == 2 * air_temp - True - -Let's extend this example slightly, by taking a slice from the middle -*latitude* dimension of the transpose cube:: - - >>> air_temp_T_slice = air_temp_T[:, 0, :] - >>> print(air_temp_T_slice.summary(True)) - air_temperature / (K) (longitude: 49; time: 240) - -Compared to our original time-series, the *air_temp_T_slice* cube has one -less dimension *and* it's shape if different. However, this doesn't prevent -us from performing cube arithmetic with it, thanks to the extended cube -broadcasting behaviour:: - - >>> result = air_temp - air_temp_T_slice - >>> print(result.summary(True)) - unknown / (K) (time: 240; latitude: 37; longitude: 49) - -Combining multiple phenomena to form a new one ----------------------------------------------- - -Combining cubes of potential-temperature and pressure we can calculate -the associated temperature using the equation: - -.. math:: - - T = \theta (\frac{p}{p_0}) ^ {(287.05 / 1005)} - -Where :math:`p` is pressure, :math:`\theta` is potential temperature, -:math:`p_0` is the potential temperature reference pressure -and :math:`T` is temperature. - -First, let's load pressure and potential temperature cubes:: - - filename = iris.sample_data_path('colpex.pp') - phenomenon_names = ['air_potential_temperature', 'air_pressure'] - pot_temperature, pressure = iris.load_cubes(filename, phenomenon_names) - -In order to calculate :math:`\frac{p}{p_0}` we can define a coordinate which -represents the standard reference pressure of 1000 hPa:: - - import iris.coords - p0 = iris.coords.AuxCoord(1000.0, - long_name='reference_pressure', - units='hPa') - -We must ensure that the units of ``pressure`` and ``p0`` are the same, -so convert the newly created coordinate using -the :meth:`iris.coords.Coord.convert_units` method:: - - p0.convert_units(pressure.units) - -Now we can combine all of this information to calculate the air temperature -using the equation above:: - - temperature = pot_temperature * ( (pressure / p0) ** (287.05 / 1005) ) - -Finally, the cube we have created needs to be given a suitable name:: - - temperature.rename('air_temperature') - -The result could now be plotted using the guidance provided in the -:doc:`plotting_a_cube` section. - -.. htmlonly:: - - A very similar example to this can be found in - :doc:`/examples/Meteorology/deriving_phenomena`. - -.. latexonly:: - - A very similar example to this can be found in the examples section, - with the title "Deriving Exner Pressure and Air Temperature". - diff --git a/docs/iris/src/userguide/cube_statistics.rst b/docs/iris/src/userguide/cube_statistics.rst deleted file mode 100644 index 3ca7d9a2e0..0000000000 --- a/docs/iris/src/userguide/cube_statistics.rst +++ /dev/null @@ -1,334 +0,0 @@ -.. _cube-statistics: - -=============== -Cube statistics -=============== - -.. _cube-statistics-collapsing: - -Collapsing entire data dimensions ---------------------------------- - -.. testsetup:: - - import iris - filename = iris.sample_data_path('uk_hires.pp') - cube = iris.load_cube(filename, 'air_potential_temperature') - - import iris.analysis.cartography - cube.coord('grid_latitude').guess_bounds() - cube.coord('grid_longitude').guess_bounds() - grid_areas = iris.analysis.cartography.area_weights(cube) - - -In the :doc:`subsetting_a_cube` section we saw how to extract a subset of a -cube in order to reduce either its dimensionality or its resolution. -Instead of simply extracting a sub-region of the data, -we can produce statistical functions of the data values -across a particular dimension, -such as a 'mean over time' or 'minimum over latitude'. - -.. _cube-statistics_forecast_printout: - -For instance, suppose we have a cube: - - >>> import iris - >>> filename = iris.sample_data_path('uk_hires.pp') - >>> cube = iris.load_cube(filename, 'air_potential_temperature') - >>> print(cube) - air_potential_temperature / (K) (time: 3; model_level_number: 7; grid_latitude: 204; grid_longitude: 187) - Dimension coordinates: - time x - - - - model_level_number - x - - - grid_latitude - - x - - grid_longitude - - - x - Auxiliary coordinates: - forecast_period x - - - - level_height - x - - - sigma - x - - - surface_altitude - - x x - Derived coordinates: - altitude - x x x - Scalar coordinates: - forecast_reference_time: 2009-11-19 04:00:00 - Attributes: - STASH: m01s00i004 - source: Data from Met Office Unified Model - um_version: 7.3 - - -In this case we have a 4 dimensional cube; -to mean the vertical (z) dimension down to a single valued extent -we can pass the coordinate name and the aggregation definition to the -:meth:`Cube.collapsed() ` method: - - >>> import iris.analysis - >>> vertical_mean = cube.collapsed('model_level_number', iris.analysis.MEAN) - >>> print(vertical_mean) - air_potential_temperature / (K) (time: 3; grid_latitude: 204; grid_longitude: 187) - Dimension coordinates: - time x - - - grid_latitude - x - - grid_longitude - - x - Auxiliary coordinates: - forecast_period x - - - surface_altitude - x x - Derived coordinates: - altitude - x x - Scalar coordinates: - forecast_reference_time: 2009-11-19 04:00:00 - level_height: 696.6666 m, bound=(0.0, 1393.3333) m - model_level_number: 10, bound=(1, 19) - sigma: 0.92292976, bound=(0.8458596, 1.0) - Attributes: - STASH: m01s00i004 - source: Data from Met Office Unified Model - um_version: 7.3 - Cell methods: - mean: model_level_number - - -Similarly other analysis operators such as ``MAX``, ``MIN`` and ``STD_DEV`` -can be used instead of ``MEAN``, see :mod:`iris.analysis` for a full list -of currently supported operators. - -For an example of using this functionality, the -:ref:`Hovmoller diagram ` example found -in the gallery takes a zonal mean of an ``XYT`` cube by using the -``collapsed`` method with ``latitude`` and ``iris.analysis.MEAN`` as arguments. - -.. _cube-statistics-collapsing-average: - -Area averaging -^^^^^^^^^^^^^^ - -Some operators support additional keywords to the ``cube.collapsed`` method. -For example, :func:`iris.analysis.MEAN ` supports -a weights keyword which can be combined with -:func:`iris.analysis.cartography.area_weights` to calculate an area average. - -Let's use the same data as was loaded in the previous example. -Since ``grid_latitude`` and ``grid_longitude`` were both point coordinates -we must guess bound positions for them -in order to calculate the area of the grid boxes:: - - import iris.analysis.cartography - cube.coord('grid_latitude').guess_bounds() - cube.coord('grid_longitude').guess_bounds() - grid_areas = iris.analysis.cartography.area_weights(cube) - -These areas can now be passed to the ``collapsed`` method as weights: - -.. doctest:: - - >>> new_cube = cube.collapsed(['grid_longitude', 'grid_latitude'], iris.analysis.MEAN, weights=grid_areas) - >>> print(new_cube) - air_potential_temperature / (K) (time: 3; model_level_number: 7) - Dimension coordinates: - time x - - model_level_number - x - Auxiliary coordinates: - forecast_period x - - level_height - x - sigma - x - Derived coordinates: - altitude - x - Scalar coordinates: - forecast_reference_time: 2009-11-19 04:00:00 - grid_latitude: 1.5145501 degrees, bound=(0.14430022, 2.8848) degrees - grid_longitude: 358.74948 degrees, bound=(357.494, 360.00497) degrees - surface_altitude: 399.625 m, bound=(-14.0, 813.25) m - Attributes: - STASH: m01s00i004 - source: Data from Met Office Unified Model - um_version: 7.3 - Cell methods: - mean: grid_longitude, grid_latitude - -Several examples of area averaging exist in the gallery which may be of interest, -including an example on taking a :ref:`global area-weighted mean -`. - -.. _cube-statistics-aggregated-by: - -Partially reducing data dimensions ----------------------------------- - -Instead of completely collapsing a dimension, other methods can be applied -to reduce or filter the number of data points of a particular dimension. - - -Aggregation of grouped data -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The :meth:`Cube.aggregated_by ` operation -combines data for all points with the same value of a given coordinate. -To do this, you need a coordinate whose points take on only a limited set -of different values -- the *number* of these then determines the size of the -reduced dimension. -The :mod:`iris.coord_categorisation` module can be used to make such -'categorical' coordinates out of ordinary ones: The most common use is -to aggregate data over regular *time intervals*, -such as by calendar month or day of the week. - -For example, let's create two new coordinates on the cube -to represent the climatological seasons and the season year respectively:: - - import iris - import iris.coord_categorisation - - filename = iris.sample_data_path('ostia_monthly.nc') - cube = iris.load_cube(filename, 'surface_temperature') - - iris.coord_categorisation.add_season(cube, 'time', name='clim_season') - iris.coord_categorisation.add_season_year(cube, 'time', name='season_year') - - - -.. note:: - - The 'season year' is not the same as year number, because (e.g.) the months - Dec11, Jan12 + Feb12 all belong to 'DJF-12'. - See :meth:`iris.coord_categorisation.add_season_year`. - - -.. testsetup:: aggregation - - import datetime - import iris - - filename = iris.sample_data_path('ostia_monthly.nc') - cube = iris.load_cube(filename, 'surface_temperature') - - import iris.coord_categorisation - iris.coord_categorisation.add_season(cube, 'time', name='clim_season') - iris.coord_categorisation.add_season_year(cube, 'time', name='season_year') - - annual_seasonal_mean = cube.aggregated_by( - ['clim_season', 'season_year'], - iris.analysis.MEAN) - - -Printing this cube now shows that two extra coordinates exist on the cube: - -.. doctest:: aggregation - - >>> print(cube) - surface_temperature / (K) (time: 54; latitude: 18; longitude: 432) - Dimension coordinates: - time x - - - latitude - x - - longitude - - x - Auxiliary coordinates: - clim_season x - - - forecast_reference_time x - - - season_year x - - - Scalar coordinates: - forecast_period: 0 hours - Attributes: - Conventions: CF-1.5 - STASH: m01s00i024 - Cell methods: - mean: month, year - - -These two coordinates can now be used to aggregate by season and climate-year: - -.. doctest:: aggregation - - >>> annual_seasonal_mean = cube.aggregated_by( - ... ['clim_season', 'season_year'], - ... iris.analysis.MEAN) - >>> print(repr(annual_seasonal_mean)) - - -The primary change in the cube is that the cube's data has been -reduced in the 'time' dimension by aggregation (taking means, in this case). -This has collected together all datapoints with the same values of season and -season-year. -The results are now indexed by the 19 different possible values of season and -season-year in a new, reduced 'time' dimension. - -We can see this by printing the first 10 values of season+year -from the original cube: These points are individual months, -so adjacent ones are often in the same season: - -.. doctest:: aggregation - :options: +NORMALIZE_WHITESPACE - - >>> for season, year in zip(cube.coord('clim_season')[:10].points, - ... cube.coord('season_year')[:10].points): - ... print(season + ' ' + str(year)) - mam 2006 - mam 2006 - jja 2006 - jja 2006 - jja 2006 - son 2006 - son 2006 - son 2006 - djf 2007 - djf 2007 - -Compare this with the first 10 values of the new cube's coordinates: -All the points now have distinct season+year values: - -.. doctest:: aggregation - :options: +NORMALIZE_WHITESPACE - - >>> for season, year in zip( - ... annual_seasonal_mean.coord('clim_season')[:10].points, - ... annual_seasonal_mean.coord('season_year')[:10].points): - ... print(season + ' ' + str(year)) - mam 2006 - jja 2006 - son 2006 - djf 2007 - mam 2007 - jja 2007 - son 2007 - djf 2008 - mam 2008 - jja 2008 - -Because the original data started in April 2006 we have some incomplete seasons -(e.g. there were only two months worth of data for 'mam-2006'). -In this case we can fix this by removing all of the resultant 'times' which -do not cover a three month period (note: judged here as > 3*28 days): - -.. doctest:: aggregation - - >>> tdelta_3mth = datetime.timedelta(hours=3*28*24.0) - >>> spans_three_months = lambda t: (t.bound[1] - t.bound[0]) > tdelta_3mth - >>> three_months_bound = iris.Constraint(time=spans_three_months) - >>> full_season_means = annual_seasonal_mean.extract(three_months_bound) - >>> full_season_means - - -The final result now represents the seasonal mean temperature for 17 seasons -from jja-2006 to jja-2010: - -.. doctest:: aggregation - :options: +NORMALIZE_WHITESPACE - - >>> for season, year in zip(full_season_means.coord('clim_season').points, - ... full_season_means.coord('season_year').points): - ... print(season + ' ' + str(year)) - jja 2006 - son 2006 - djf 2007 - mam 2007 - jja 2007 - son 2007 - djf 2008 - mam 2008 - jja 2008 - son 2008 - djf 2009 - mam 2009 - jja 2009 - son 2009 - djf 2010 - mam 2010 - jja 2010 - diff --git a/docs/iris/src/userguide/end_of_userguide.rst b/docs/iris/src/userguide/end_of_userguide.rst deleted file mode 100644 index c8f951a634..0000000000 --- a/docs/iris/src/userguide/end_of_userguide.rst +++ /dev/null @@ -1,15 +0,0 @@ -End of the user guide -===================== - -If this was your first time reading the user guide, we hope you found it enjoyable and informative. -It is advised that you now go back to the :doc:`start ` and try experimenting with your own data. - - - -Iris gallery ------------- -It can be very daunting to start coding a project from an empty file, that is why you will find many in-depth -examples in the Iris gallery which can be used as a goal driven reference to producing your own visualisations. - -If you produce a visualisation which you think would add value to the gallery, please get in touch with us and -we will consider including it as an example for all to benefit from. diff --git a/docs/iris/src/userguide/index.rst b/docs/iris/src/userguide/index.rst deleted file mode 100644 index 8c0b24bec3..0000000000 --- a/docs/iris/src/userguide/index.rst +++ /dev/null @@ -1,41 +0,0 @@ -.. _user_guide_index: - -=============== -Iris user guide -=============== - -How to use the user guide ---------------------------- -If you are reading this user guide for the first time it is strongly recommended that you read the user guide -fully before experimenting with your own data files. - - -Much of the content has supplementary links to the reference documentation; you will not need to follow these -links in order to understand the guide but they may serve as a useful reference for future exploration. - -.. htmlonly:: - - Since later pages depend on earlier ones, try reading this user guide sequentially using the ``next`` and ``previous`` links. - - -User guide table of contents -------------------------------- - -.. toctree:: - :maxdepth: 2 - :numbered: - - iris_cubes.rst - loading_iris_cubes.rst - saving_iris_cubes.rst - navigating_a_cube.rst - subsetting_a_cube.rst - real_and_lazy_data.rst - plotting_a_cube.rst - interpolation_and_regridding.rst - merge_and_concat.rst - cube_statistics.rst - cube_maths.rst - citation.rst - code_maintenance.rst - end_of_userguide.rst diff --git a/docs/iris/src/userguide/interpolation_and_regridding.rst b/docs/iris/src/userguide/interpolation_and_regridding.rst deleted file mode 100644 index 565f9b61eb..0000000000 --- a/docs/iris/src/userguide/interpolation_and_regridding.rst +++ /dev/null @@ -1,412 +0,0 @@ -.. _interpolation_and_regridding: - - -.. testsetup:: * - - import numpy as np - import iris - import warnings - warnings.simplefilter('ignore') - -================================= -Cube interpolation and regridding -================================= - -Iris provides powerful cube-aware interpolation and regridding functionality, -exposed through Iris cube methods. This functionality is provided by building -upon existing interpolation schemes implemented by SciPy. - -In Iris we refer to the avaliable types of interpolation and regridding as -`schemes`. The following are the interpolation schemes that are currently -available in Iris: - - * linear interpolation (:class:`iris.analysis.Linear`), and - * nearest-neighbour interpolation (:class:`iris.analysis.Nearest`). - -The following are the regridding schemes that are currently available in Iris: - - * linear regridding (:class:`iris.analysis.Linear`), - * nearest-neighbour regridding (:class:`iris.analysis.Nearest`), and - * area-weighted regridding (:class:`iris.analysis.AreaWeighted`, first-order conservative). - - -.. _interpolation: - -Interpolation -------------- - -Interpolating a cube is achieved with the :meth:`~iris.cube.Cube.interpolate` -method. This method expects two arguments: - - #. the sample points to interpolate, and - #. the second argument being the interpolation scheme to use. - -The result is a new cube, interpolated at the sample points. - -Sample points must be defined as an iterable of ``(coord, value(s))`` pairs. -The `coord` argument can be either a coordinate name or coordinate instance. -The specified coordinate must exist on the cube being interpolated! For example: - - * coordinate names and scalar sample points: ``[('latitude', 51.48), ('longitude', 0)]``, - * a coordinate instance and a scalar sample point: ``[(cube.coord('latitude'), 51.48)]``, and - * a coordinate name and a NumPy array of sample points: ``[('longitude', np.linspace(-11, 2, 14))]`` - -are all examples of valid sample points. - -The values for coordinates that correspond to date/times can be supplied as -datetime.datetime or cftime.datetime instances, -e.g. ``[('time', datetime.datetime(2009, 11, 19, 10, 30))]``). - -Let's take the air temperature cube we've seen previously: - - >>> air_temp = iris.load_cube(iris.sample_data_path('air_temp.pp')) - >>> print(air_temp) - air_temperature / (K) (latitude: 73; longitude: 96) - Dimension coordinates: - latitude x - - longitude - x - Scalar coordinates: - forecast_period: 6477 hours, bound=(-28083.0, 6477.0) hours - forecast_reference_time: 1998-03-01 03:00:00 - pressure: 1000.0 hPa - time: 1998-12-01 00:00:00, bound=(1994-12-01 00:00:00, 1998-12-01 00:00:00) - Attributes: - STASH: m01s16i203 - source: Data from Met Office Unified Model - Cell methods: - mean within years: time - mean over years: time - -We can interpolate specific values from the coordinates of the cube: - - >>> sample_points = [('latitude', 51.48), ('longitude', 0)] - >>> print(air_temp.interpolate(sample_points, iris.analysis.Linear())) - air_temperature / (K) (scalar cube) - Scalar coordinates: - forecast_period: 6477 hours, bound=(-28083.0, 6477.0) hours - forecast_reference_time: 1998-03-01 03:00:00 - latitude: 51.48 degrees - longitude: 0 degrees - pressure: 1000.0 hPa - time: 1998-12-01 00:00:00, bound=(1994-12-01 00:00:00, 1998-12-01 00:00:00) - Attributes: - STASH: m01s16i203 - source: Data from Met Office Unified Model - Cell methods: - mean within years: time - mean over years: time - -As we can see, the resulting cube is scalar and has longitude and latitude coordinates with -the values defined in our sample points. - -It isn't necessary to specify sample points for every dimension, only those that you -wish to interpolate over: - - >>> result = air_temp.interpolate([('longitude', 0)], iris.analysis.Linear()) - >>> print('Original: ' + air_temp.summary(shorten=True)) - Original: air_temperature / (K) (latitude: 73; longitude: 96) - >>> print('Interpolated: ' + result.summary(shorten=True)) - Interpolated: air_temperature / (K) (latitude: 73) - -The sample points for a coordinate can be an array of values. When multiple coordinates are -provided with arrays instead of scalar sample points, the coordinates on the resulting cube -will be orthogonal: - - >>> sample_points = [('longitude', np.linspace(-11, 2, 14)), - ... ('latitude', np.linspace(48, 60, 13))] - >>> result = air_temp.interpolate(sample_points, iris.analysis.Linear()) - >>> print(result.summary(shorten=True)) - air_temperature / (K) (latitude: 13; longitude: 14) - - -Interpolating non-horizontal coordinates -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Interpolation in Iris is not limited to horizontal-spatial coordinates - any -coordinate satisfying the prerequisites of the chosen scheme may be interpolated -over. - -For instance, the :class:`iris.analysis.Linear` scheme requires 1D numeric, -monotonic, coordinates. Supposing we have a single column cube such as -the one defined below: - - >>> cube = iris.load_cube(iris.sample_data_path('hybrid_height.nc'), 'air_potential_temperature') - >>> column = cube[:, 0, 0] - >>> print(column.summary(shorten=True)) - air_potential_temperature / (K) (model_level_number: 15) - -This cube has a "hybrid-height" vertical coordinate system, meaning that the vertical -coordinate is unevenly spaced in altitude: - - >>> print(column.coord('altitude').points) - [ 418.69836 434.5705 456.7928 485.3665 520.2933 561.5752 - 609.2145 663.2141 723.57697 790.30664 863.4072 942.8823 - 1028.737 1120.9764 1219.6051 ] - -We could regularise the vertical coordinate by defining 10 equally spaced altitude -sample points between 400 and 1250 and interpolating our vertical coordinate onto -these sample points: - - >>> sample_points = [('altitude', np.linspace(400, 1250, 10))] - >>> new_column = column.interpolate(sample_points, iris.analysis.Linear()) - >>> print(new_column.summary(shorten=True)) - air_potential_temperature / (K) (model_level_number: 10) - -Let's look at the original data, the interpolation line and -the new data in a plot. This will help us to see what is going on: - -.. plot:: userguide/regridding_plots/interpolate_column.py - -The red diamonds on the extremes of the altitude values show that we have -extrapolated data beyond the range of the original data. In some cases this is -desirable but in other cases it is not. For example, this column defines -a surface altitude value of 414m, so extrapolating an "air potential temperature" -at 400m makes little physical sense in this case. - -We can control the extrapolation mode when defining the interpolation scheme. -Controlling the extrapolation mode allows us to avoid situations like the above where -extrapolating values makes little physical sense. - -The extrapolation mode is controlled by the ``extrapolation_mode`` keyword. -For the available interpolation schemes available in Iris, the ``extrapolation_mode`` -keyword must be one of: - - * ``extrapolate`` -- the extrapolation points will be calculated by extending the gradient of the closest two points, - * ``error`` -- a ValueError exception will be raised, notifying an attempt to extrapolate, - * ``nan`` -- the extrapolation points will be be set to NaN, - * ``mask`` -- the extrapolation points will always be masked, even if the source data is not a MaskedArray, or - * ``nanmask`` -- if the source data is a MaskedArray the extrapolation points will be masked. Otherwise they will be set to NaN. - -Using an extrapolation mode is achieved by constructing an interpolation scheme -with the extrapolation mode keyword set as required. The constructed scheme -is then passed to the :meth:`~iris.cube.Cube.interpolate` method. -For example, to mask values that lie beyond the range of the original data: - - >>> scheme = iris.analysis.Linear(extrapolation_mode='mask') - >>> new_column = column.interpolate(sample_points, scheme) - >>> print(new_column.coord('altitude').points) - [-- 494.44451904296875 588.888916015625 683.333251953125 777.77783203125 - 872.2222290039062 966.666748046875 1061.111083984375 1155.555419921875 --] - - -.. _caching_an_interpolator: - -Caching an interpolator -^^^^^^^^^^^^^^^^^^^^^^^ - -If you need to interpolate a cube on multiple sets of sample points you can -'cache' an interpolator to be used for each of these interpolations. This can -shorten the execution time of your code as the most computationally -intensive part of an interpolation is setting up the interpolator. - -To cache an interpolator you must set up an interpolator scheme and call the -scheme's interpolator method. The interpolator method takes as arguments: - - #. a cube to be interpolated, and - #. an iterable of coordinate names or coordinate instances of the coordinates that are to be interpolated over. - -For example: - - >>> air_temp = iris.load_cube(iris.sample_data_path('air_temp.pp')) - >>> interpolator = iris.analysis.Nearest().interpolator(air_temp, ['latitude', 'longitude']) - -When this cached interpolator is called you must pass it an iterable of sample points -that have the same form as the iterable of coordinates passed to the constructor. -So, to use the cached interpolator defined above: - - >>> latitudes = np.linspace(48, 60, 13) - >>> longitudes = np.linspace(-11, 2, 14) - >>> for lat, lon in zip(latitudes, longitudes): - ... result = interpolator([lat, lon]) - -In each case ``result`` will be a cube interpolated from the ``air_temp`` cube we -passed to interpolator. - -Note that you must specify the required extrapolation mode when setting up the cached interpolator. -For example:: - - >>> interpolator = iris.analysis.Nearest(extrapolation_mode='nan').interpolator(cube, coords) - - -.. _regridding: - -Regridding ----------- - -Regridding is conceptually a very similar process to interpolation in Iris. -The primary difference is that interpolation is based on sample points, while -regridding is based on the **horizontal** grid of *another cube*. - -Regridding a cube is achieved with the :meth:`cube.regrid() ` method. -This method expects two arguments: - - #. *another cube* that defines the target grid onto which the cube should be regridded, and - #. the regridding scheme to use. - -.. note:: - - Regridding is a common operation needed to allow comparisons of data on different grids. - The powerful mapping functionality provided by cartopy, however, means that regridding - is often not necessary if performed just for visualisation purposes. - -Let's load two cubes that have different grids and coordinate systems: - - >>> global_air_temp = iris.load_cube(iris.sample_data_path('air_temp.pp')) - >>> rotated_psl = iris.load_cube(iris.sample_data_path('rotated_pole.nc')) - -We can visually confirm that they are on different grids by plotting the two cubes: - -.. plot:: userguide/regridding_plots/regridding_plot.py - -Let's regrid the ``global_air_temp`` cube onto a rotated pole grid -using a linear regridding scheme. To achieve this we pass the ``rotated_psl`` -cube to the regridder to supply the target grid to regrid the ``global_air_temp`` -cube onto: - - >>> rotated_air_temp = global_air_temp.regrid(rotated_psl, iris.analysis.Linear()) - -.. plot:: userguide/regridding_plots/regridded_to_rotated.py - -We could regrid the pressure values onto the global grid, but this will involve -some form of extrapolation. As with interpolation, we can control the extrapolation -mode when defining the regridding scheme. - -For the available regridding schemes in Iris, the ``extrapolation_mode`` keyword -must be one of: - - * ``extrapolate`` -- - - * for :class:`~iris.analysis.Linear` the extrapolation points will be calculated by extending the gradient of the closest two points. - * for :class:`~iris.analysis.Nearest` the extrapolation points will take their value from the nearest source point. - - * ``nan`` -- the extrapolation points will be be set to NaN. - * ``error`` -- a ValueError exception will be raised, notifying an attempt to extrapolate. - * ``mask`` -- the extrapolation points will always be masked, even if the source data is not a MaskedArray. - * ``nanmask`` -- if the source data is a MaskedArray the extrapolation points will be masked. Otherwise they will be set to NaN. - -The ``rotated_psl`` cube is defined on a limited area rotated pole grid. If we regridded -the ``rotated_psl`` cube onto the global grid as defined by the ``global_air_temp`` cube -any linearly extrapolated values would quickly become dominant and highly inaccurate. -We can control this behaviour by defining the ``extrapolation_mode`` in the constructor -of the regridding scheme to mask values that lie outside of the domain of the rotated -pole grid: - - >>> scheme = iris.analysis.Linear(extrapolation_mode='mask') - >>> global_psl = rotated_psl.regrid(global_air_temp, scheme) - -.. plot:: userguide/regridding_plots/regridded_to_global.py - -Notice that although we can still see the approximate shape of the rotated pole grid, the -cells have now become rectangular in a plate carrée (equirectangular) projection. -The spatial grid of the resulting cube is really global, with a large proportion of the -data being masked. - -Area-weighted regridding -^^^^^^^^^^^^^^^^^^^^^^^^ - -It is often the case that a point-based regridding scheme (such as -:class:`iris.analysis.Linear` or :class:`iris.analysis.Nearest`) is not -appropriate when you need to conserve quantities when regridding. The -:class:`iris.analysis.AreaWeighted` scheme is less general than -:class:`~iris.analysis.Linear` or :class:`~iris.analysis.Nearest`, but is a -conservative regridding scheme, meaning that the area-weighted total is -approximately preserved across grids. - -With the :class:`~iris.analysis.AreaWeighted` regridding scheme, each target grid-box's -data is computed as a weighted mean of all grid-boxes from the source grid. The weighting -for any given target grid-box is the area of the intersection with each of the -source grid-boxes. This scheme performs well when regridding from a high -resolution source grid to a lower resolution target grid, since all source data -points will be accounted for in the target grid. - -Let's demonstrate this with the global air temperature cube we saw previously, -along with a limited area cube containing total concentration of volcanic ash: - - >>> global_air_temp = iris.load_cube(iris.sample_data_path('air_temp.pp')) - >>> print(global_air_temp.summary(shorten=True)) - air_temperature / (K) (latitude: 73; longitude: 96) - >>> - >>> regional_ash = iris.load_cube(iris.sample_data_path('NAME_output.txt')) - >>> regional_ash = regional_ash.collapsed('flight_level', iris.analysis.SUM) - >>> print(regional_ash.summary(shorten=True)) - VOLCANIC_ASH_AIR_CONCENTRATION / (g/m3) (latitude: 214; longitude: 584) - -One of the key limitations of the :class:`~iris.analysis.AreaWeighted` -regridding scheme is that the two input grids must be defined in the same -coordinate system as each other. Both input grids must also contain monotonic, -bounded, 1D spatial coordinates. - -.. note:: - - The :class:`~iris.analysis.AreaWeighted` regridding scheme requires spatial - areas, therefore the longitude and latitude coordinates must be bounded. - If the longitude and latitude bounds are not defined in the cube we can - guess the bounds based on the coordinates' point values: - - >>> global_air_temp.coord('longitude').guess_bounds() - >>> global_air_temp.coord('latitude').guess_bounds() - -Using NumPy's masked array module we can mask any data that falls below a meaningful -concentration: - - >>> regional_ash.data = np.ma.masked_less(regional_ash.data, 5e-6) - -Finally, we can regrid the data using the :class:`~iris.analysis.AreaWeighted` -regridding scheme: - - >>> scheme = iris.analysis.AreaWeighted(mdtol=0.5) - >>> global_ash = regional_ash.regrid(global_air_temp, scheme) - >>> print(global_ash.summary(shorten=True)) - VOLCANIC_ASH_AIR_CONCENTRATION / (g/m3) (latitude: 73; longitude: 96) - -Note that the :class:`~iris.analysis.AreaWeighted` regridding scheme allows us -to define a missing data tolerance (``mdtol``), which specifies the tolerated -fraction of masked data in any given target grid-box. If the fraction of masked -data within a target grid-box exceeds this value, the data in this target -grid-box will be masked in the result. - -The fraction of masked data is calculated based on the area of masked source -grid-boxes that overlaps with each target grid-box. Defining an ``mdtol`` in the -:class:`~iris.analysis.AreaWeighted` regridding scheme allows fine control -of masked data tolerance. It is worth remembering that defining an ``mdtol`` of -anything other than 1 will prevent the scheme from being fully conservative, as -some data will be disregarded if it lies close to masked data. - -To visualise the above regrid, let's plot the original data, along with 3 distinct -``mdtol`` values to compare the result: - -.. plot:: userguide/regridding_plots/regridded_to_global_area_weighted.py - - -.. _caching_a_regridder: - -Caching a regridder -^^^^^^^^^^^^^^^^^^^ - -If you need to regrid multiple cubes with a common source grid onto a common -target grid you can 'cache' a regridder to be used for each of these regrids. -This can shorten the execution time of your code as the most computationally -intensive part of a regrid is setting up the regridder. - -To cache a regridder you must set up a regridder scheme and call the -scheme's regridder method. The regridder method takes as arguments: - - #. a cube (that is to be regridded) defining the source grid, and - #. a cube defining the target grid to regrid the source cube to. - -For example: - - >>> global_air_temp = iris.load_cube(iris.sample_data_path('air_temp.pp')) - >>> rotated_psl = iris.load_cube(iris.sample_data_path('rotated_pole.nc')) - >>> regridder = iris.analysis.Nearest().regridder(global_air_temp, rotated_psl) - -When this cached regridder is called you must pass it a cube on the same grid -as the source grid cube (in this case ``global_air_temp``) that is to be -regridded to the target grid. For example:: - - >>> for cube in list_of_cubes_on_source_grid: - ... result = regridder(cube) - -In each case ``result`` will be the input cube regridded to the grid defined by -the target grid cube (in this case ``rotated_psl``) that we used to define the -cached regridder. diff --git a/docs/iris/src/userguide/iris_cubes.rst b/docs/iris/src/userguide/iris_cubes.rst deleted file mode 100644 index dc423afba1..0000000000 --- a/docs/iris/src/userguide/iris_cubes.rst +++ /dev/null @@ -1,196 +0,0 @@ -.. _user_guide_introduction: - -=================== -Introduction -=================== - -.. _iris_data_structures: - -Iris data structures --------------------- -The top level object in Iris is called a cube. A cube contains data and metadata about a phenomenon. - -In Iris, a cube is an interpretation of the *Climate and Forecast (CF) Metadata Conventions* whose purpose is to: - - *require conforming datasets to contain sufficient metadata that they are self-describing... including physical - units if appropriate, and that each value can be located in space (relative to earth-based coordinates) and time.* - -Whilst the CF conventions are often mentioned alongside NetCDF, Iris implements several major format importers which can take -files of specific formats and turn them into Iris cubes. Additionally, a framework is provided which allows users -to extend Iris' import capability to cater for specialist or unimplemented formats. - -A single cube describes one and only one phenomenon, always has a name, a unit and -an n-dimensional data array to represents the cube's phenomenon. In order to locate the -data spatially, temporally, or in any other higher-dimensional space, a collection of *coordinates* -exist on the cube. - - -Coordinates -=========== - -A coordinate is a container to store metadata about some dimension(s) of a cube's data array and therefore, -by definition, its phenomenon. - - * Each coordinate has a name and a unit. - * When a coordinate is added to a cube, the data dimensions that it represents are also provided. - * The shape of a coordinate is always the same as the shape of the associated data dimension(s) on the cube. - * A dimension not explicitly listed signifies that the coordinate is independent of that dimension. - * Each dimension of a coordinate must be mapped to a data dimension. The only coordinates with no mapping are - scalar coordinates. - - * Depending on the underlying data that the coordinate is representing, its values may be discrete points or be - bounded to represent interval extents (e.g. temperature at *point x* **vs** rainfall accumulation *between 0000-1200 hours*). - * Coordinates have an attributes dictionary which can hold arbitrary extra metadata, excluding certain restricted CF names - * More complex coordinates may contain a coordinate system which is necessary to fully interpret the values - contained within the coordinate. - -There are two classes of coordinates: - - **DimCoord** - - * Numeric - * Monotonic - * Representative of, at most, a single data dimension (1d) - - **AuxCoord** - - * May be of any type, including strings - * May represent multiple data dimensions (n-dimensional) - - -Cube -==== -A cube consists of: - - * a standard name and/or a long name and an appropriate unit - * a data array who's values are representative of the phenomenon - * a collection of coordinates and associated data dimensions on the cube's data array, which are split into two separate lists: - - * *dimension coordinates* - DimCoords which uniquely map to exactly one data dimension, ordered by dimension. - * *auxiliary coordinates* - DimCoords or AuxCoords which map to as many data dimensions as the coordinate has dimensions. - - * an attributes dictionary which, other than some protected CF names, can hold arbitrary extra metadata. - * a list of cell methods to represent operations which have already been applied to the data (e.g. "mean over time") - * a list of coordinate "factories" used for deriving coordinates from the values of other coordinates in the cube - - -Cubes in practice ------------------ - - -A simple cube example -===================== - -Suppose we have some gridded data which has 24 air temperature readings (in Kelvin) which is located at -4 different longitudes, 2 different latitudes and 3 different heights. Our data array can be represented pictorially: - -.. image:: multi_array.png - -Where dimensions 0, 1, and 2 have lengths 3, 2 and 4 respectively. - -The Iris cube to represent this data would consist of: - - * a standard name of ``air_temperature`` and a unit of ``kelvin`` - * a data array of shape ``(3, 2, 4)`` - * a coordinate, mapping to dimension 0, consisting of: - - * a standard name of ``height`` and unit of ``meters`` - * an array of length 3 representing the 3 ``height`` points - - * a coordinate, mapping to dimension 1, consisting of: - - * a standard name of ``latitude`` and unit of ``degrees`` - * an array of length 2 representing the 2 latitude points - * a coordinate system such that the ``latitude`` points could be fully located on the globe - - * a coordinate, mapping to dimension 2, consisting of: - - * a standard name of ``longitude`` and unit of ``degrees`` - * an array of length 4 representing the 4 longitude points - * a coordinate system such that the ``longitude`` points could be fully located on the globe - - - - -Pictorially the cube has taken on more information than a simple array: - - -.. image:: multi_array_to_cube.png - - -Additionally further information may be optionally attached to the cube. -For example, it is possible to attach any of the following: - - * a coordinate, not mapping to any data dimensions, consisting of: - - * a standard name of ``time`` and unit of ``days since 2000-01-01 00:00`` - * a data array of length 1 representing the time that the data array is valid for - - * an auxiliary coordinate, mapping to dimensions 1 and 2, consisting of: - - * a long name of ``place name`` and no unit - * a 2d string array of shape ``(2, 4)`` with the names of the 8 places that the lat/lons correspond to - - * an auxiliary coordinate "factory", which can derive its own mapping, consisting of: - - * a standard name of ``height`` and a unit of ``feet`` - * knowledge of how data values for this coordinate can be calculated given the ``height in meters`` coordinate - - * a cell method of "mean" over "ensemble" to indicate that the data has been meaned over - a collection of "ensembles" (i.e. multiple model runs). - - -Printing a cube -=============== - -Every Iris cube can be printed to screen as you will see later in the user guide. It is worth familiarising yourself with the -output as this is the quickest way of inspecting the contents of a cube. Here is the result of printing a real life cube: - -.. _hybrid_cube_printout: - -.. testcode:: - :hide: - - import iris - filename = iris.sample_data_path('uk_hires.pp') - # NOTE: Every time the output of this cube changes, the full list of deductions below should be re-assessed. - print(iris.load_cube(filename, 'air_potential_temperature')) - -.. testoutput:: - - air_potential_temperature / (K) (time: 3; model_level_number: 7; grid_latitude: 204; grid_longitude: 187) - Dimension coordinates: - time x - - - - model_level_number - x - - - grid_latitude - - x - - grid_longitude - - - x - Auxiliary coordinates: - forecast_period x - - - - level_height - x - - - sigma - x - - - surface_altitude - - x x - Derived coordinates: - altitude - x x x - Scalar coordinates: - forecast_reference_time: 2009-11-19 04:00:00 - Attributes: - STASH: m01s00i004 - source: Data from Met Office Unified Model - um_version: 7.3 - - -Using this output we can deduce that: - - * The cube represents air potential temperature. - * There are 4 data dimensions, and the data has a shape of ``(3, 7, 204, 187)`` - * The 4 data dimensions are mapped to the ``time``, ``model_level_number``, - ``grid_latitude``, ``grid_longitude`` coordinates respectively - * There are three 1d auxiliary coordinates and one 2d auxiliary (``surface_altitude``) - * There is a single ``altitude`` derived coordinate, which spans 3 data dimensions - * There are 7 distinct values in the "model_level_number" coordinate. Similar inferences can - be made for the other dimension coordinates. - * There are 7, not necessarily distinct, values in the ``level_height`` coordinate. - * There is a single ``forecast_reference_time`` scalar coordinate representing the entire cube. - * The cube has one further attribute relating to the phenomenon. - In this case the originating file format, PP, encodes information in a STASH code which in some cases can - be useful for identifying advanced experiment information relating to the phenomenon. diff --git a/docs/iris/src/userguide/loading_iris_cubes.rst b/docs/iris/src/userguide/loading_iris_cubes.rst deleted file mode 100644 index 2cb3b9b259..0000000000 --- a/docs/iris/src/userguide/loading_iris_cubes.rst +++ /dev/null @@ -1,465 +0,0 @@ -.. _loading_iris_cubes: - -=================== -Loading Iris cubes -=================== - -To load a single file into a **list** of Iris cubes -the :py:func:`iris.load` function is used:: - - import iris - filename = '/path/to/file' - cubes = iris.load(filename) - -Iris will attempt to return **as few cubes as possible** -by collecting together multiple fields with a shared standard name -into a single multidimensional cube. - -The :py:func:`iris.load` function automatically recognises the format -of the given files and attempts to produce Iris Cubes from their contents. - -.. note:: - - Currently there is support for CF NetCDF, GRIB 1 & 2, PP and FieldsFiles - file formats with a framework for this to be extended to custom formats. - - -In order to find out what has been loaded, the result can be printed: - - >>> import iris - >>> filename = iris.sample_data_path('uk_hires.pp') - >>> cubes = iris.load(filename) - >>> print(cubes) - 0: air_potential_temperature / (K) (time: 3; model_level_number: 7; grid_latitude: 204; grid_longitude: 187) - 1: surface_altitude / (m) (grid_latitude: 204; grid_longitude: 187) - - -This shows that there were 2 cubes as a result of loading the file, they were: -``air_potential_temperature`` and ``surface_altitude``. - -The ``surface_altitude`` cube was 2 dimensional with: - * the two dimensions have extents of 204 and 187 respectively and are - represented by the ``grid_latitude`` and ``grid_longitude`` coordinates. - -The ``air_potential_temperature`` cubes were 4 dimensional with: - * the same length ``grid_latitude`` and ``grid_longitude`` dimensions as - ``surface_altitide`` - * a ``time`` dimension of length 3 - * a ``model_level_number`` dimension of length 7 - -.. note:: - - The result of :func:`iris.load` is **always** a - :class:`list of cubes `. - Anything that can be done with a Python :class:`list` can be done - with the resultant list of cubes. It is worth noting, however, that - there is no inherent order to this - :class:`list of cubes `. - Because of this, indexing may be inconsistent. A more consistent way to - extract a cube is by using the :class:`iris.Constraint` class as - described in :ref:`constrained-loading`. - -.. hint:: - - Throughout this user guide you will see the function - ``iris.sample_data_path`` being used to get the filename for the resources - used in the examples. The result of this function is just a string. - - Using this function allows us to provide examples which will work - across platforms and with data installed in different locations, - however in practice you will want to use your own strings:: - - filename = '/path/to/file' - cubes = iris.load(filename) - -To get the air potential temperature cube from the list of cubes -returned by :py:func:`iris.load` in the previous example, -list indexing can be used: - - >>> import iris - >>> filename = iris.sample_data_path('uk_hires.pp') - >>> cubes = iris.load(filename) - >>> # get the first cube (list indexing is 0 based) - >>> air_potential_temperature = cubes[0] - >>> print(air_potential_temperature) - air_potential_temperature / (K) (time: 3; model_level_number: 7; grid_latitude: 204; grid_longitude: 187) - Dimension coordinates: - time x - - - - model_level_number - x - - - grid_latitude - - x - - grid_longitude - - - x - Auxiliary coordinates: - forecast_period x - - - - level_height - x - - - sigma - x - - - surface_altitude - - x x - Derived coordinates: - altitude - x x x - Scalar coordinates: - forecast_reference_time: 2009-11-19 04:00:00 - Attributes: - STASH: m01s00i004 - source: Data from Met Office Unified Model - um_version: 7.3 - -Notice that the result of printing a **cube** is a little more verbose than -it was when printing a **list of cubes**. In addition to the very short summary -which is provided when printing a list of cubes, information is provided -on the coordinates which constitute the cube in question. -This was the output discussed at the end of the :doc:`iris_cubes` section. - -.. note:: - - Dimensioned coordinates will have a dimension marker ``x`` in the - appropriate column for each cube data dimension that they describe. - - -Loading multiple files ------------------------ - -To load more than one file into a list of cubes, a list of filenames can be -provided to :py:func:`iris.load`:: - - filenames = [iris.sample_data_path('uk_hires.pp'), - iris.sample_data_path('air_temp.pp')] - cubes = iris.load(filenames) - - -It is also possible to load one or more files with wildcard substitution -using the expansion rules defined :py:mod:`fnmatch`. - -For example, to match **zero or more characters** in the filename, -star wildcards can be used:: - - filename = iris.sample_data_path('GloSea4', '*.pp') - cubes = iris.load(filename) - - -.. note:: - - The cubes returned will not necessarily be in the same order as the - order of the filenames. - -Lazy loading ------------- - -In fact when Iris loads data from most file types, it normally only reads the -essential descriptive information or metadata : the bulk of the actual data -content will only be loaded later, as it is needed. -This is referred to as 'lazy' data. It allows loading to be much quicker, and to occupy less memory. - -For more on the benefits, handling and uses of lazy data, see :doc:`Real and Lazy Data `. - - -.. _constrained-loading: - -Constrained loading ------------------------ -Given a large dataset, it is possible to restrict or constrain the load -to match specific Iris cube metadata. -Constrained loading provides the ability to generate a cube -from a specific subset of data that is of particular interest. - -As we have seen, loading the following file creates several Cubes:: - - filename = iris.sample_data_path('uk_hires.pp') - cubes = iris.load(filename) - -Specifying a name as a constraint argument to :py:func:`iris.load` will mean -only cubes with a matching :meth:`name ` -will be returned:: - - filename = iris.sample_data_path('uk_hires.pp') - cubes = iris.load(filename, 'specific_humidity') - -To constrain the load to multiple distinct constraints, a list of constraints -can be provided. This is equivalent to running load once for each constraint -but is likely to be more efficient:: - - filename = iris.sample_data_path('uk_hires.pp') - cubes = iris.load(filename, ['air_potential_temperature', 'specific_humidity']) - -The :class:`iris.Constraint` class can be used to restrict coordinate values -on load. For example, to constrain the load to match -a specific ``model_level_number``:: - - filename = iris.sample_data_path('uk_hires.pp') - level_10 = iris.Constraint(model_level_number=10) - cubes = iris.load(filename, level_10) - -Constraints can be combined using ``&`` to represent a more restrictive -constraint to ``load``:: - - filename = iris.sample_data_path('uk_hires.pp') - forecast_6 = iris.Constraint(forecast_period=6) - level_10 = iris.Constraint(model_level_number=10) - cubes = iris.load(filename, forecast_6 & level_10) - -As well as being able to combine constraints using ``&``, -the :class:`iris.Constraint` class can accept multiple arguments, -and a list of values can be given to constrain a coordinate to one of -a collection of values:: - - filename = iris.sample_data_path('uk_hires.pp') - level_10_or_16_fp_6 = iris.Constraint(model_level_number=[10, 16], forecast_period=6) - cubes = iris.load(filename, level_10_or_16_fp_6) - -A common requirement is to limit the value of a coordinate to a specific range, -this can be achieved by passing the constraint a function:: - - def bottom_16_levels(cell): - # return True or False as to whether the cell in question should be kept - return cell <= 16 - - filename = iris.sample_data_path('uk_hires.pp') - level_lt_16 = iris.Constraint(model_level_number=bottom_16_levels) - cubes = iris.load(filename, level_lt_16) - -.. note:: - - As with many of the examples later in this documentation, the - simple function above can be conveniently written as a lambda function - on a single line:: - - bottom_16_levels = lambda cell: cell <= 16 - - -Note also the :ref:`warning on equality constraints with floating point coordinates `. - - -Cube attributes can also be part of the constraint criteria. Supposing a -cube attribute of ``STASH`` existed, as is the case when loading ``PP`` files, -then specific STASH codes can be filtered:: - - filename = iris.sample_data_path('uk_hires.pp') - level_10_with_stash = iris.AttributeConstraint(STASH='m01s00i004') & iris.Constraint(model_level_number=10) - cubes = iris.load(filename, level_10_with_stash) - -.. seealso:: - - For advanced usage there are further examples in the - :class:`iris.Constraint` reference documentation. - - -Constraining a circular coordinate across its boundary -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Occasionally you may need to constrain your cube with a region that crosses the -boundary of a circular coordinate (this is often the meridian or the dateline / -antimeridian). An example use-case of this is to extract the entire Pacific Ocean -from a cube whose longitudes are bounded by the dateline. - -This functionality cannot be provided reliably using contraints. Instead you should use the -functionality provided by :meth:`cube.intersection ` -to extract this region. - - -.. _using-time-constraints: - -Constraining on Time -^^^^^^^^^^^^^^^^^^^^ -Iris follows NetCDF-CF rules in representing time coordinate values as normalised, -purely numeric, values which are normalised by the calendar specified in the coordinate's -units (e.g. "days since 1970-01-01"). -However, when constraining by time we usually want to test calendar-related -aspects such as hours of the day or months of the year, so Iris -provides special features to facilitate this: - -Firstly, when Iris evaluates Constraint expressions, it will convert time-coordinate -values (points and bounds) from numbers into :class:`~datetime.datetime`-like objects -for ease of calendar-based testing. - - >>> filename = iris.sample_data_path('uk_hires.pp') - >>> cube_all = iris.load_cube(filename, 'air_potential_temperature') - >>> print('All times :\n' + str(cube_all.coord('time'))) - All times : - DimCoord([2009-11-19 10:00:00, 2009-11-19 11:00:00, 2009-11-19 12:00:00], standard_name='time', calendar='gregorian') - >>> # Define a function which accepts a datetime as its argument (this is simplified in later examples). - >>> hour_11 = iris.Constraint(time=lambda cell: cell.point.hour == 11) - >>> cube_11 = cube_all.extract(hour_11) - >>> print('Selected times :\n' + str(cube_11.coord('time'))) - Selected times : - DimCoord([2009-11-19 11:00:00], standard_name='time', calendar='gregorian') - -Secondly, the :class:`iris.time` module provides flexible time comparison -facilities. An :class:`iris.time.PartialDateTime` object can be compared to -objects such as :class:`datetime.datetime` instances, and this comparison will -then test only those 'aspects' which the PartialDateTime instance defines: - - >>> import datetime - >>> from iris.time import PartialDateTime - >>> dt = datetime.datetime(2011, 3, 7) - >>> print(dt > PartialDateTime(year=2010, month=6)) - True - >>> print(dt > PartialDateTime(month=6)) - False - >>> - -These two facilities can be combined to provide straightforward calendar-based -time selections when loading or extracting data. - -The previous constraint example can now be written as: - - >>> the_11th_hour = iris.Constraint(time=iris.time.PartialDateTime(hour=11)) - >>> print(iris.load_cube( - ... iris.sample_data_path('uk_hires.pp'), - ... 'air_potential_temperature' & the_11th_hour).coord('time')) - DimCoord([2009-11-19 11:00:00], standard_name='time', calendar='gregorian') - -It is common that a cube will need to be constrained between two given dates. -In the following example we construct a time sequence representing the first -day of every week for many years: - -.. testsetup:: timeseries_range - - import datetime - import numpy as np - from iris.time import PartialDateTime - long_ts = iris.cube.Cube(np.arange(150), long_name='data', units='1') - _mondays = iris.coords.DimCoord(7 * np.arange(150), standard_name='time', units='days since 2007-04-09') - long_ts.add_dim_coord(_mondays, 0) - - -.. doctest:: timeseries_range - :options: +NORMALIZE_WHITESPACE, +ELLIPSIS - - >>> print(long_ts.coord('time')) - DimCoord([2007-04-09 00:00:00, 2007-04-16 00:00:00, 2007-04-23 00:00:00, - ... - 2010-02-01 00:00:00, 2010-02-08 00:00:00, 2010-02-15 00:00:00], - standard_name='time', calendar='gregorian') - -Given two dates in datetime format, we can select all points between them. - -.. doctest:: timeseries_range - :options: +NORMALIZE_WHITESPACE, +ELLIPSIS - - >>> d1 = datetime.datetime.strptime('20070715T0000Z', '%Y%m%dT%H%MZ') - >>> d2 = datetime.datetime.strptime('20070825T0000Z', '%Y%m%dT%H%MZ') - >>> st_swithuns_daterange_07 = iris.Constraint( - ... time=lambda cell: d1 <= cell.point < d2) - >>> within_st_swithuns_07 = long_ts.extract(st_swithuns_daterange_07) - >>> print(within_st_swithuns_07.coord('time')) - DimCoord([2007-07-16 00:00:00, 2007-07-23 00:00:00, 2007-07-30 00:00:00, - 2007-08-06 00:00:00, 2007-08-13 00:00:00, 2007-08-20 00:00:00], - standard_name='time', calendar='gregorian') - -Alternatively, we may rewrite this using :class:`iris.time.PartialDateTime` -objects. - -.. doctest:: timeseries_range - :options: +NORMALIZE_WHITESPACE, +ELLIPSIS - - >>> pdt1 = PartialDateTime(year=2007, month=7, day=15) - >>> pdt2 = PartialDateTime(year=2007, month=8, day=25) - >>> st_swithuns_daterange_07 = iris.Constraint( - ... time=lambda cell: pdt1 <= cell.point < pdt2) - >>> within_st_swithuns_07 = long_ts.extract(st_swithuns_daterange_07) - >>> print(within_st_swithuns_07.coord('time')) - DimCoord([2007-07-16 00:00:00, 2007-07-23 00:00:00, 2007-07-30 00:00:00, - 2007-08-06 00:00:00, 2007-08-13 00:00:00, 2007-08-20 00:00:00], - standard_name='time', calendar='gregorian') - -A more complex example might require selecting points over an annually repeating -date range. We can select points within a certain part of the year, in this case -between the 15th of July through to the 25th of August. By making use of -PartialDateTime this becomes simple: - -.. doctest:: timeseries_range - - >>> st_swithuns_daterange = iris.Constraint( - ... time=lambda cell: PartialDateTime(month=7, day=15) <= cell < PartialDateTime(month=8, day=25)) - >>> within_st_swithuns = long_ts.extract(st_swithuns_daterange) - ... - >>> print(within_st_swithuns.coord('time')) - DimCoord([2007-07-16 00:00:00, 2007-07-23 00:00:00, 2007-07-30 00:00:00, - 2007-08-06 00:00:00, 2007-08-13 00:00:00, 2007-08-20 00:00:00, - 2008-07-21 00:00:00, 2008-07-28 00:00:00, 2008-08-04 00:00:00, - 2008-08-11 00:00:00, 2008-08-18 00:00:00, 2009-07-20 00:00:00, - 2009-07-27 00:00:00, 2009-08-03 00:00:00, 2009-08-10 00:00:00, - 2009-08-17 00:00:00, 2009-08-24 00:00:00], standard_name='time', calendar='gregorian') - -Notice how the dates printed are between the range specified in the ``st_swithuns_daterange`` -and that they span multiple years. - - -Strict loading --------------- - -The :py:func:`iris.load_cube` and :py:func:`iris.load_cubes` functions are -similar to :py:func:`iris.load` except they can only return -*one cube per constraint*. -The :func:`iris.load_cube` function accepts a single constraint and -returns a single cube. The :func:`iris.load_cubes` function accepts any -number of constraints and returns a list of cubes (as an `iris.cube.CubeList`). -Providing no constraints to :func:`iris.load_cube` or :func:`iris.load_cubes` -is equivalent to requesting exactly one cube of any type. - -A single cube is loaded in the following example:: - - >>> filename = iris.sample_data_path('air_temp.pp') - >>> cube = iris.load_cube(filename) - >>> print(cube) - air_temperature / (K) (latitude: 73; longitude: 96) - Dimension coordinates: - latitude x - - longitude - x - ... - Cell methods: - mean: time - -However, when attempting to load data which would result in anything other than -one cube, an exception is raised:: - - >>> filename = iris.sample_data_path('uk_hires.pp') - >>> cube = iris.load_cube(filename) - Traceback (most recent call last): - ... - iris.exceptions.ConstraintMismatchError: Expected exactly one cube, found 2. - -.. note:: - - All the load functions share many of the same features, hence - multiple files could be loaded with wildcard filenames - or by providing a list of filenames. - -The strict nature of :func:`iris.load_cube` and :func:`iris.load_cubes` -means that, when combined with constrained loading, it is possible to -ensure that precisely what was asked for on load is given -- otherwise an exception is raised. -This fact can be utilised to make code only run successfully if -the data provided has the expected criteria. - -For example, suppose that code needed ``air_potential_temperature`` -in order to run:: - - import iris - filename = iris.sample_data_path('uk_hires.pp') - air_pot_temp = iris.load_cube(filename, 'air_potential_temperature') - print(air_pot_temp) - -Should the file not produce exactly one cube with a standard name of -'air_potential_temperature', an exception will be raised. - -Similarly, supposing a routine needed both 'surface_altitude' and -'air_potential_temperature' to be able to run:: - - import iris - filename = iris.sample_data_path('uk_hires.pp') - altitude_cube, pot_temp_cube = iris.load_cubes(filename, ['surface_altitude', 'air_potential_temperature']) - -The result of :func:`iris.load_cubes` in this case will be a list of 2 cubes -ordered by the constraints provided. Multiple assignment has been used to put -these two cubes into separate variables. - -.. note:: - - In Python, lists of a pre-known length and order can be exploited - using *multiple assignment*: - - >>> number_one, number_two = [1, 2] - >>> print(number_one) - 1 - >>> print(number_two) - 2 - diff --git a/docs/iris/src/userguide/merge.png b/docs/iris/src/userguide/merge.png deleted file mode 100644 index cafaa370da..0000000000 Binary files a/docs/iris/src/userguide/merge.png and /dev/null differ diff --git a/docs/iris/src/userguide/merge.svg b/docs/iris/src/userguide/merge.svg deleted file mode 100644 index 9326bc332b..0000000000 --- a/docs/iris/src/userguide/merge.svg +++ /dev/null @@ -1,714 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - image/svg+xml - - - - - - - y - - - - - - - - - - - - - - - - - - - - - 0 - - 1 - 2 - 3 - 0 - 1 - 2 - 3 - - - - - - - - - x - z - x - z - - - diff --git a/docs/iris/src/userguide/merge_and_concat.png b/docs/iris/src/userguide/merge_and_concat.png deleted file mode 100644 index 48238287b4..0000000000 Binary files a/docs/iris/src/userguide/merge_and_concat.png and /dev/null differ diff --git a/docs/iris/src/userguide/multi_array.png b/docs/iris/src/userguide/multi_array.png deleted file mode 100644 index 54a2688d2a..0000000000 Binary files a/docs/iris/src/userguide/multi_array.png and /dev/null differ diff --git a/docs/iris/src/userguide/multi_array.svg b/docs/iris/src/userguide/multi_array.svg deleted file mode 100644 index d28f6d71d6..0000000000 --- a/docs/iris/src/userguide/multi_array.svg +++ /dev/null @@ -1,455 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - image/svg+xml - - - - - - - - 2 - 1 - 0 - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/iris/src/userguide/multi_array_to_cube.png b/docs/iris/src/userguide/multi_array_to_cube.png deleted file mode 100644 index 1144ee6715..0000000000 Binary files a/docs/iris/src/userguide/multi_array_to_cube.png and /dev/null differ diff --git a/docs/iris/src/userguide/multi_array_to_cube.svg b/docs/iris/src/userguide/multi_array_to_cube.svg deleted file mode 100644 index a2fc2f5e26..0000000000 --- a/docs/iris/src/userguide/multi_array_to_cube.svg +++ /dev/null @@ -1,1378 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - image/svg+xml - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 2 - - -180 - 90 - 0 - 90 - Longitude (degrees) - Height (meters) - - - - - - - - Latitude (degrees) - - 1 - - - -45 - 45 - - 0 - Air temperature (kelvin) - 2 - 10 - 25 - - diff --git a/docs/iris/src/userguide/plotting_examples/1d_quickplot_simple.py b/docs/iris/src/userguide/plotting_examples/1d_quickplot_simple.py deleted file mode 100644 index 75462101a0..0000000000 --- a/docs/iris/src/userguide/plotting_examples/1d_quickplot_simple.py +++ /dev/null @@ -1,18 +0,0 @@ - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -import matplotlib.pyplot as plt - -import iris -import iris.quickplot as qplt - - -fname = iris.sample_data_path('air_temp.pp') -temperature = iris.load_cube(fname) - -# Take a 1d slice using array style indexing. -temperature_1d = temperature[5, :] - -qplt.plot(temperature_1d) -plt.show() diff --git a/docs/iris/src/userguide/plotting_examples/1d_simple.py b/docs/iris/src/userguide/plotting_examples/1d_simple.py deleted file mode 100644 index 8cb3f45643..0000000000 --- a/docs/iris/src/userguide/plotting_examples/1d_simple.py +++ /dev/null @@ -1,18 +0,0 @@ - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -import matplotlib.pyplot as plt - -import iris -import iris.plot as iplt - - -fname = iris.sample_data_path('air_temp.pp') -temperature = iris.load_cube(fname) - -# Take a 1d slice using array style indexing. -temperature_1d = temperature[5, :] - -iplt.plot(temperature_1d) -plt.show() diff --git a/docs/iris/src/userguide/plotting_examples/1d_with_legend.py b/docs/iris/src/userguide/plotting_examples/1d_with_legend.py deleted file mode 100644 index b0aee43c4a..0000000000 --- a/docs/iris/src/userguide/plotting_examples/1d_with_legend.py +++ /dev/null @@ -1,47 +0,0 @@ - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -import matplotlib.pyplot as plt - -import iris -import iris.plot as iplt - - -fname = iris.sample_data_path('air_temp.pp') - -# Load exactly one cube from the given file -temperature = iris.load_cube(fname) - -# We are only interested in a small number of longitudes (the 4 after and -# including the 5th element), so index them out -temperature = temperature[5:9, :] - -for cube in temperature.slices('longitude'): - - # Create a string label to identify this cube (i.e. latitude: value) - cube_label = 'latitude: %s' % cube.coord('latitude').points[0] - - # Plot the cube, and associate it with a label - iplt.plot(cube, label=cube_label) - -# Match the longitude range to global -max_lon = temperature.coord('longitude').points.max() -min_lon = temperature.coord('longitude').points.min() -plt.xlim(min_lon, max_lon) - -# Add the legend with 2 columns -plt.legend(ncol=2) - -# Put a grid on the plot -plt.grid(True) - -# Provide some axis labels -plt.ylabel('Temerature / kelvin') -plt.xlabel('Longitude / degrees') - -# And a sensible title -plt.suptitle('Air Temperature', fontsize=20, y=0.9) - -# Finally, show it. -plt.show() diff --git a/docs/iris/src/userguide/plotting_examples/brewer.py b/docs/iris/src/userguide/plotting_examples/brewer.py deleted file mode 100644 index 2d61e276ab..0000000000 --- a/docs/iris/src/userguide/plotting_examples/brewer.py +++ /dev/null @@ -1,27 +0,0 @@ - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -import matplotlib.pyplot as plt -import numpy as np - -import iris.palette - - -a = np.linspace(0, 1, 256).reshape(1, -1) -a = np.vstack((a, a)) - -maps = sorted(iris.palette.CMAP_BREWER) -nmaps = len(maps) - -fig = plt.figure(figsize=(7, 10)) -fig.subplots_adjust(top=0.99, bottom=0.01, left=0.2, right=0.99) -for i, m in enumerate(maps): - ax = plt.subplot(nmaps, 1, i+1) - plt.axis("off") - plt.imshow(a, aspect='auto', cmap=plt.get_cmap(m), origin='lower') - pos = list(ax.get_position().bounds) - fig.text(pos[0] - 0.01, pos[1], m, fontsize=8, - horizontalalignment='right') - -plt.show() diff --git a/docs/iris/src/userguide/plotting_examples/cube_blockplot.py b/docs/iris/src/userguide/plotting_examples/cube_blockplot.py deleted file mode 100644 index a6273a274c..0000000000 --- a/docs/iris/src/userguide/plotting_examples/cube_blockplot.py +++ /dev/null @@ -1,19 +0,0 @@ - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -import matplotlib.pyplot as plt - -import iris -import iris.quickplot as qplt - - -# Load the data for a single value of model level number. -fname = iris.sample_data_path('hybrid_height.nc') -temperature_cube = iris.load_cube( - fname, iris.Constraint(model_level_number=1)) - -# Draw the block plot. -qplt.pcolormesh(temperature_cube) - -plt.show() diff --git a/docs/iris/src/userguide/plotting_examples/cube_brewer_cite_contourf.py b/docs/iris/src/userguide/plotting_examples/cube_brewer_cite_contourf.py deleted file mode 100644 index 9ba68fc47a..0000000000 --- a/docs/iris/src/userguide/plotting_examples/cube_brewer_cite_contourf.py +++ /dev/null @@ -1,29 +0,0 @@ - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -import matplotlib.pyplot as plt - -import iris -import iris.quickplot as qplt -import iris.plot as iplt - - -fname = iris.sample_data_path('air_temp.pp') -temperature_cube = iris.load_cube(fname) - -# Get the Purples "Brewer" palette. -brewer_cmap = plt.get_cmap('brewer_Purples_09') - -# Draw the contours, with n-levels set for the map colours (9). -# NOTE: needed as the map is non-interpolated, but matplotlib does not provide -# any special behaviour for these. -qplt.contourf(temperature_cube, brewer_cmap.N, cmap=brewer_cmap) - -# Add a citation to the plot. -iplt.citation(iris.plot.BREWER_CITE) - -# Add coastlines to the map created by contourf. -plt.gca().coastlines() - -plt.show() diff --git a/docs/iris/src/userguide/plotting_examples/cube_brewer_contourf.py b/docs/iris/src/userguide/plotting_examples/cube_brewer_contourf.py deleted file mode 100644 index aacf129c30..0000000000 --- a/docs/iris/src/userguide/plotting_examples/cube_brewer_contourf.py +++ /dev/null @@ -1,25 +0,0 @@ - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -import matplotlib.cm as mpl_cm -import matplotlib.pyplot as plt - -import iris -import iris.quickplot as qplt - -fname = iris.sample_data_path('air_temp.pp') -temperature_cube = iris.load_cube(fname) - -# Load a Cynthia Brewer palette. -brewer_cmap = mpl_cm.get_cmap('brewer_OrRd_09') - -# Draw the contours, with n-levels set for the map colours (9). -# NOTE: needed as the map is non-interpolated, but matplotlib does not provide -# any special behaviour for these. -qplt.contourf(temperature_cube, brewer_cmap.N, cmap=brewer_cmap) - -# Add coastlines to the map created by contourf. -plt.gca().coastlines() - -plt.show() diff --git a/docs/iris/src/userguide/plotting_examples/cube_contour.py b/docs/iris/src/userguide/plotting_examples/cube_contour.py deleted file mode 100644 index 0ec2fa7be4..0000000000 --- a/docs/iris/src/userguide/plotting_examples/cube_contour.py +++ /dev/null @@ -1,23 +0,0 @@ - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -import matplotlib.pyplot as plt - -import iris -import iris.quickplot as qplt - - -fname = iris.sample_data_path('air_temp.pp') -temperature_cube = iris.load_cube(fname) - -# Add a contour, and put the result in a variable called contour. -contour = qplt.contour(temperature_cube) - -# Add coastlines to the map created by contour. -plt.gca().coastlines() - -# Add contour labels based on the contour we have just created. -plt.clabel(contour, inline=False) - -plt.show() diff --git a/docs/iris/src/userguide/plotting_examples/cube_contourf.py b/docs/iris/src/userguide/plotting_examples/cube_contourf.py deleted file mode 100644 index 21ebb1a214..0000000000 --- a/docs/iris/src/userguide/plotting_examples/cube_contourf.py +++ /dev/null @@ -1,20 +0,0 @@ - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -import matplotlib.pyplot as plt - -import iris -import iris.quickplot as qplt - - -fname = iris.sample_data_path('air_temp.pp') -temperature_cube = iris.load_cube(fname) - -# Draw the contour with 25 levels. -qplt.contourf(temperature_cube, 25) - -# Add coastlines to the map created by contourf. -plt.gca().coastlines() - -plt.show() diff --git a/docs/iris/src/userguide/real_and_lazy_data.rst b/docs/iris/src/userguide/real_and_lazy_data.rst deleted file mode 100644 index 84a35efa64..0000000000 --- a/docs/iris/src/userguide/real_and_lazy_data.rst +++ /dev/null @@ -1,235 +0,0 @@ - -.. _real_and_lazy_data: - - -.. testsetup:: * - - import dask.array as da - import iris - import numpy as np - - -================== -Real and Lazy Data -================== - -We have seen in the :doc:`iris_cubes` section of the user guide that -Iris cubes contain data and metadata about a phenomenon. The data element of a cube -is always an array, but the array may be either "real" or "lazy". - -In this section of the user guide we will look specifically at the concepts of -real and lazy data as they apply to the cube and other data structures in Iris. - - -What is real and lazy data? ---------------------------- - -In Iris, we use the term **real data** to describe data arrays that are loaded -into memory. Real data is typically provided as a -`NumPy array `_, -which has a shape and data type that are used to describe the array's data points. -Each data point takes up a small amount of memory, which means large NumPy arrays can -take up a large amount of memory. - -Conversely, we use the term **lazy data** to describe data that is not loaded into memory. -(This is sometimes also referred to as **deferred data**.) -In Iris, lazy data is provided as a -`dask array `_. -A dask array also has a shape and data type -but the dask array's data points remain on disk and only loaded into memory in -small chunks when absolutely necessary. This has key performance benefits for -handling large amounts of data, where both calculation time and storage -requirements can be significantly reduced. - -In Iris, when actual data values are needed from a lazy data array, it is -*'realised'* : this means that all the actual values are read in from the file, -and a 'real' -(i.e. `numpy `_) -array replaces the lazy array within the Iris object. - -Following realisation, the Iris object just contains the actual ('real') -data, so the time cost of reading all the data is not incurred again. -From here on, access to the data is fast, but it now occupies its full memory space. - -In particular, any direct reference to a `cube.data` will realise the cube data -content : any lazy content is lost as the data is read from file, and the cube -content is replaced with a real array. -This is also referred to simply as "touching" the data. - -See the section :ref:`when_real_data` -for more examples of this. - -You can check whether a cube has real data or lazy data by using the method -:meth:`~iris.cube.Cube.has_lazy_data`. For example:: - - >>> cube = iris.load_cube(iris.sample_data_path('air_temp.pp')) - >>> cube.has_lazy_data() - True - # Realise the lazy data. - >>> cube.data - >>> cube.has_lazy_data() - False - - -Benefits --------- - -The primary advantage of using lazy data is that it enables -`out-of-core processing `_; -that is, the loading and manipulating of datasets without loading the full data into memory. - -There are two key benefits from this : - -**Firstly**, the result of a calculation on a large dataset often occupies much -less storage space than the source data -- such as for instance a maximum data -value calculated over a large number of datafiles. -In these cases the result can be computed in sections, without ever requiring the -entire source dataset to be loaded, thus drastically reducing memory footprint. -This strategy of task division can also enable reduced execution time through the effective -use of parallel processing capabilities. - -**Secondly**, it is often simply convenient to form a calculation on a large -dataset, of which only a certain portion is required at any one time --- for example, plotting individual timesteps from a large sequence. -In such cases, a required portion can be extracted and realised without calculating the entire result. - -.. _when_real_data: - -When does my data become real? ------------------------------- - -Certain operations, such as cube indexing and statistics, can be -performed in a lazy fashion, producing a 'lazy' result from a lazy input, so -that no realisation immediately occurs. -However other operations, such as plotting or printing data values, will always -trigger the 'realisation' of data. - -When you load a dataset using Iris the data array will almost always initially be -a lazy array. This section details some operations that will realise lazy data -as well as some operations that will maintain lazy data. We use the term **realise** -to mean converting lazy data into real data. - -Most operations on data arrays can be run equivalently on both real and lazy data. -If the data array is real then the operation will be run on the data array -immediately. The results of the operation will be available as soon as processing is completed. -If the data array is lazy then the operation will be deferred and the data array will -remain lazy until you request the result (such as when you read from ``cube.data``):: - - >>> cube = iris.load_cube(iris.sample_data_path('air_temp.pp')) - >>> cube.has_lazy_data() - True - >>> cube += 5 - >>> cube.has_lazy_data() - True - -The process by which the operation is deferred until the result is requested is -referred to as **lazy evaluation**. - -Certain operations, including regridding and plotting, can only be run on real data. -Calling such operations on lazy data will automatically realise your lazy data. - -You can also realise (and so load into memory) your cube's lazy data if you 'touch' the data. -To 'touch' the data means directly accessing the data by calling ``cube.data``, -as in the previous example. - -Core data -^^^^^^^^^ - -Cubes have the concept of "core data". This returns the cube's data in its -current state: - - * If a cube has lazy data, calling the cube's :meth:`~iris.cube.Cube.core_data` method - will return the cube's lazy dask array. Calling the cube's - :meth:`~iris.cube.Cube.core_data` method **will never realise** the cube's data. - * If a cube has real data, calling the cube's :meth:`~iris.cube.Cube.core_data` method - will return the cube's real NumPy array. - -For example:: - - >>> cube = iris.load_cube(iris.sample_data_path('air_temp.pp')) - >>> cube.has_lazy_data() - True - - >>> the_data = cube.core_data() - >>> type(the_data) - - >>> cube.has_lazy_data() - True - - # Realise the lazy data. - >>> cube.data - >>> the_data = cube.core_data() - >>> type(the_data) - - >>> cube.has_lazy_data() - False - - -Coordinates ------------ - -In the same way that Iris cubes contain a data array, Iris coordinates contain a -points array and an optional bounds array. -Coordinate points and bounds arrays can also be real or lazy: - - * A :class:`~iris.coords.DimCoord` will only ever have **real** points and bounds - arrays because of monotonicity checks that realise lazy arrays. - * An :class:`~iris.coords.AuxCoord` can have **real or lazy** points and bounds. - * An :class:`~iris.aux_factory.AuxCoordFactory` (or derived coordinate) - can have **real or lazy** points and bounds. If all of the - :class:`~iris.coords.AuxCoord` instances used to construct the derived coordinate - have real points and bounds then the derived coordinate will have real points - and bounds, otherwise the derived coordinate will have lazy points and bounds. - -Iris cubes and coordinates have very similar interfaces, which extends to accessing -coordinates' lazy points and bounds: - -.. doctest:: - - >>> cube = iris.load_cube(iris.sample_data_path('hybrid_height.nc'), 'air_potential_temperature') - - >>> dim_coord = cube.coord('model_level_number') - >>> print(dim_coord.has_lazy_points()) - False - >>> print(dim_coord.has_bounds()) - False - >>> print(dim_coord.has_lazy_bounds()) - False - - >>> aux_coord = cube.coord('sigma') - >>> print(aux_coord.has_lazy_points()) - True - >>> print(aux_coord.has_bounds()) - True - >>> print(aux_coord.has_lazy_bounds()) - True - - # Realise the lazy points. This will **not** realise the lazy bounds. - >>> points = aux_coord.points - >>> print(aux_coord.has_lazy_points()) - False - >>> print(aux_coord.has_lazy_bounds()) - True - - >>> derived_coord = cube.coord('altitude') - >>> print(derived_coord.has_lazy_points()) - True - >>> print(derived_coord.has_bounds()) - True - >>> print(derived_coord.has_lazy_bounds()) - True - -.. note:: - Printing a lazy :class:`~iris.coords.AuxCoord` will realise its points and bounds arrays! - - -Dask processing options ------------------------ - -Iris uses dask to provide lazy data arrays for both Iris cubes and coordinates, -and for computing deferred operations on lazy arrays. - -Dask provides processing options to control how deferred operations on lazy arrays -are computed. This is provided via the ``dask.set_options`` interface. See the -`dask documentation `_ -for more information on setting dask processing options. diff --git a/docs/iris/src/userguide/regridding_plots/interpolate_column.py b/docs/iris/src/userguide/regridding_plots/interpolate_column.py deleted file mode 100644 index 201dcda4a1..0000000000 --- a/docs/iris/src/userguide/regridding_plots/interpolate_column.py +++ /dev/null @@ -1,63 +0,0 @@ - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -import iris -import iris.quickplot as qplt -import iris.analysis -import matplotlib.pyplot as plt -import numpy as np - - -fname = iris.sample_data_path('hybrid_height.nc') -column = iris.load_cube(fname, 'air_potential_temperature')[:, 0, 0] - -alt_coord = column.coord('altitude') - -# Interpolate the "perfect" linear interpolation. Really this is just -# a high number of interpolation points, in this case 1000 of them. -altitude_points = [('altitude', np.linspace(400, 1250, 1000))] -scheme = iris.analysis.Linear(extrapolation_mode='mask') -linear_column = column.interpolate(altitude_points, scheme) - -# Now interpolate the data onto 10 evenly spaced altitude levels, -# as we did in the example. -altitude_points = [('altitude', np.linspace(400, 1250, 10))] -scheme = iris.analysis.Linear() -new_column = column.interpolate(altitude_points, scheme) - -plt.figure(figsize=(5, 4), dpi=100) - -# Plot the black markers for the original data. -qplt.plot(column, column.coord('altitude'), - marker='o', color='black', linestyle='', markersize=3, - label='Original values', zorder=2) - -# Plot the gray line to display the linear interpolation. -qplt.plot(linear_column, linear_column.coord('altitude'), - color='gray', - label='Linear interpolation', zorder=0) - -# Plot the red markers for the new data. -qplt.plot(new_column, new_column.coord('altitude'), - marker='D', color='red', linestyle='', - label='Interpolated values', zorder=1) - -ax = plt.gca() -# Space the plot such that the labels appear correctly. -plt.subplots_adjust(left=0.17, bottom=0.14) - -# Limit the plot to a maximum of 5 ticks. -ax.xaxis.get_major_locator().set_params(nbins=5) - -# Prevent matplotlib from using "offset" notation on the xaxis. -ax.xaxis.get_major_formatter().set_useOffset(False) - -# Put some space between the line and the axes. -ax.margins(0.05) - -# Place gridlines and a legend. -ax.grid() -plt.legend(loc='lower right') - -plt.show() diff --git a/docs/iris/src/userguide/regridding_plots/regridded_to_global.py b/docs/iris/src/userguide/regridding_plots/regridded_to_global.py deleted file mode 100644 index c392de7a52..0000000000 --- a/docs/iris/src/userguide/regridding_plots/regridded_to_global.py +++ /dev/null @@ -1,26 +0,0 @@ - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -import iris -import iris.analysis -import iris.plot as iplt -import matplotlib.pyplot as plt - - -global_air_temp = iris.load_cube(iris.sample_data_path('air_temp.pp')) -rotated_psl = iris.load_cube(iris.sample_data_path('rotated_pole.nc')) - -scheme = iris.analysis.Linear(extrapolation_mode='mask') -global_psl = rotated_psl.regrid(global_air_temp, scheme) - -plt.figure(figsize=(4, 3)) -iplt.pcolormesh(global_psl) -plt.title('Air pressure\n' - 'on a global longitude latitude grid') -ax = plt.gca() -ax.coastlines() -ax.gridlines() -ax.set_extent([-90, 70, 10, 80]) - -plt.show() diff --git a/docs/iris/src/userguide/regridding_plots/regridded_to_global_area_weighted.py b/docs/iris/src/userguide/regridding_plots/regridded_to_global_area_weighted.py deleted file mode 100644 index 067a4ee2d6..0000000000 --- a/docs/iris/src/userguide/regridding_plots/regridded_to_global_area_weighted.py +++ /dev/null @@ -1,51 +0,0 @@ - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -import iris -import iris.analysis -import iris.plot as iplt -import matplotlib.pyplot as plt -import matplotlib.colors -import numpy as np - -global_air_temp = iris.load_cube(iris.sample_data_path('air_temp.pp')) - -regional_ash = iris.load_cube(iris.sample_data_path('NAME_output.txt')) -regional_ash = regional_ash.collapsed('flight_level', iris.analysis.SUM) - -# Mask values so low that they are anomalous. -regional_ash.data = np.ma.masked_less(regional_ash.data, 5e-6) - -norm = matplotlib.colors.LogNorm(5e-6, 0.0175) - -global_air_temp.coord('longitude').guess_bounds() -global_air_temp.coord('latitude').guess_bounds() - -fig = plt.figure(figsize=(8, 4.5)) - -plt.subplot(2, 2, 1) -iplt.pcolormesh(regional_ash, norm=norm) -plt.title('Volcanic ash total\nconcentration not regridded', - size='medium') - -for subplot_num, mdtol in zip([2, 3, 4], [0, 0.5, 1]): - plt.subplot(2, 2, subplot_num) - scheme = iris.analysis.AreaWeighted(mdtol=mdtol) - global_ash = regional_ash.regrid(global_air_temp, scheme) - iplt.pcolormesh(global_ash, norm=norm) - plt.title('Volcanic ash total concentration\n' - 'regridded with AreaWeighted(mdtol={})'.format(mdtol), - size='medium') - -plt.subplots_adjust(hspace=0, wspace=0.05, - left=0.001, right=0.999, bottom=0, top=0.955) - -# Iterate over each of the figure's axes, adding coastlines, gridlines -# and setting the extent. -for ax in fig.axes: - ax.coastlines('50m') - ax.gridlines() - ax.set_extent([-80, 40, 31, 75]) - -plt.show() diff --git a/docs/iris/src/userguide/regridding_plots/regridded_to_rotated.py b/docs/iris/src/userguide/regridding_plots/regridded_to_rotated.py deleted file mode 100644 index f2c2b271df..0000000000 --- a/docs/iris/src/userguide/regridding_plots/regridded_to_rotated.py +++ /dev/null @@ -1,25 +0,0 @@ - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -import iris -import iris.analysis -import iris.plot as iplt -import matplotlib.pyplot as plt - -global_air_temp = iris.load_cube(iris.sample_data_path('air_temp.pp')) -rotated_psl = iris.load_cube(iris.sample_data_path('rotated_pole.nc')) - -rotated_air_temp = global_air_temp.regrid(rotated_psl, iris.analysis.Linear()) - - -plt.figure(figsize=(4, 3)) - -iplt.pcolormesh(rotated_air_temp, norm=plt.Normalize(260, 300)) -plt.title('Air temperature\n' - 'on a limited area rotated pole grid') -ax = plt.gca() -ax.coastlines(resolution='50m') -ax.gridlines() - -plt.show() diff --git a/docs/iris/src/userguide/regridding_plots/regridding_plot.py b/docs/iris/src/userguide/regridding_plots/regridding_plot.py deleted file mode 100644 index 2419d47727..0000000000 --- a/docs/iris/src/userguide/regridding_plots/regridding_plot.py +++ /dev/null @@ -1,33 +0,0 @@ - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -import iris -import iris.plot as iplt -import matplotlib.pyplot as plt - -# Load the data. -global_air_temp = iris.load_cube(iris.sample_data_path('air_temp.pp')) -rotated_psl = iris.load_cube(iris.sample_data_path('rotated_pole.nc')) - -plt.figure(figsize=(9, 3.5)) - -plt.subplot(1, 2, 1) -iplt.pcolormesh(global_air_temp, norm=plt.Normalize(260, 300)) -plt.title('Air temperature\n' - 'on a global longitude latitude grid') -ax = plt.gca() -ax.coastlines() -ax.gridlines() - -plt.subplot(1, 2, 2) -iplt.pcolormesh(rotated_psl) -plt.title('Air pressure\n' - 'on a limited area rotated pole grid') -ax = plt.gca() -ax.coastlines(resolution='50m') -ax.gridlines() - -plt.tight_layout() - -plt.show() diff --git a/docs/iris/src/userguide/subsetting_a_cube.rst b/docs/iris/src/userguide/subsetting_a_cube.rst deleted file mode 100644 index b61f16a043..0000000000 --- a/docs/iris/src/userguide/subsetting_a_cube.rst +++ /dev/null @@ -1,210 +0,0 @@ -.. _subsetting_a_cube: - -================= -Subsetting a Cube -================= - -The :doc:`loading_iris_cubes` section of the user guide showed how to load data into multidimensional Iris cubes. -However it is often necessary to reduce the dimensionality of a cube down to something more appropriate and/or manageable. - -Iris provides several ways of reducing both the amount of data and/or the number of dimensions in your cube depending on the circumstance. -In all cases **the subset of a valid cube is itself a valid cube**. - - -Cube extraction -^^^^^^^^^^^^^^^^ -A subset of a cube can be "extracted" from a multi-dimensional cube in order to reduce its dimensionality: - - >>> import iris - >>> filename = iris.sample_data_path('space_weather.nc') - >>> cube = iris.load_cube(filename, 'electron density') - >>> equator_slice = cube.extract(iris.Constraint(grid_latitude=0)) - >>> print(equator_slice) - electron density / (1E11 e/m^3) (height: 29; grid_longitude: 31) - Dimension coordinates: - height x - - grid_longitude - x - Auxiliary coordinates: - latitude - x - longitude - x - Scalar coordinates: - grid_latitude: 0.0 degrees - Attributes: - Conventions: CF-1.5 - - -In this example we start with a 3 dimensional cube, with dimensions of ``height``, ``grid_latitude`` and ``grid_longitude``, -and extract every point where the latitude is 0, resulting in a 2d cube with axes of ``height`` and ``grid_longitude``. - - -.. _floating-point-warning: -.. warning:: - - Caution is required when using equality constraints with floating point coordinates such as ``grid_latitude``. - Printing the points of a coordinate does not necessarily show the full precision of the underlying number and it - is very easy return no matches to a constraint when one was expected. - This can be avoided by using a function as the argument to the constraint:: - - def near_zero(cell): - """Returns true if the cell is between -0.1 and 0.1.""" - return -0.1 < cell < 0.1 - - equator_constraint = iris.Constraint(grid_latitude=near_zero) - - Often you will see this construct in shorthand using a lambda function definition:: - - equator_constraint = iris.Constraint(grid_latitude=lambda cell: -0.1 < cell < 0.1) - - -The extract method could be applied again to the *equator_slice* cube to get a further subset. - -For example to get a ``height`` of 9000 metres at the equator the following line extends the previous example:: - - equator_height_9km_slice = equator_slice.extract(iris.Constraint(height=9000)) - print(equator_height_9km_slice) - -The two steps required to get ``height`` of 9000 m at the equator can be simplified into a single constraint:: - - equator_height_9km_slice = cube.extract(iris.Constraint(grid_latitude=0, height=9000)) - print(equator_height_9km_slice) - -As we saw in :doc:`loading_iris_cubes` the result of :func:`iris.load` is a :class:`CubeList `. -The ``extract`` method also exists on a :class:`CubeList ` and behaves in exactly the -same way as loading with constraints: - - >>> import iris - >>> air_temp_and_fp_6 = iris.Constraint('air_potential_temperature', forecast_period=6) - >>> level_10 = iris.Constraint(model_level_number=10) - >>> filename = iris.sample_data_path('uk_hires.pp') - >>> cubes = iris.load(filename).extract(air_temp_and_fp_6 & level_10) - >>> print(cubes) - 0: air_potential_temperature / (K) (grid_latitude: 204; grid_longitude: 187) - >>> print(cubes[0]) - air_potential_temperature / (K) (grid_latitude: 204; grid_longitude: 187) - Dimension coordinates: - grid_latitude x - - grid_longitude - x - Auxiliary coordinates: - surface_altitude x x - Derived coordinates: - altitude x x - Scalar coordinates: - forecast_period: 6.0 hours - forecast_reference_time: 2009-11-19 04:00:00 - level_height: 395.0 m, bound=(360.0, 433.3332) m - model_level_number: 10 - sigma: 0.9549927, bound=(0.9589389, 0.95068014) - time: 2009-11-19 10:00:00 - Attributes: - STASH: m01s00i004 - source: Data from Met Office Unified Model - um_version: 7.3 - - -Cube iteration -^^^^^^^^^^^^^^^ -A useful way of dealing with a Cube in its **entirety** is by iterating over its layers or slices. -For example, to deal with a 3 dimensional cube (z,y,x) you could iterate over all 2 dimensional slices in y and x -which make up the full 3d cube.:: - - import iris - filename = iris.sample_data_path('hybrid_height.nc') - cube = iris.load_cube(filename) - print(cube) - for yx_slice in cube.slices(['grid_latitude', 'grid_longitude']): - print(repr(yx_slice)) - -As the original cube had the shape (15, 100, 100) there were 15 latitude longitude slices and hence the -line ``print(repr(yx_slice))`` was run 15 times. - -.. note:: - - The order of latitude and longitude in the list is important; had they been swapped the resultant cube slices - would have been transposed. - - For further information see :py:meth:`Cube.slices `. - - -This method can handle n-dimensional slices by providing more or fewer coordinate names in the list to **slices**:: - - import iris - filename = iris.sample_data_path('hybrid_height.nc') - cube = iris.load_cube(filename) - print(cube) - for i, x_slice in enumerate(cube.slices(['grid_longitude'])): - print(i, repr(x_slice)) - -The Python function :py:func:`enumerate` is used in this example to provide an incrementing variable **i** which is -printed with the summary of each cube slice. Note that there were 1500 1d longitude cubes as a result of -slicing the 3 dimensional cube (15, 100, 100) by longitude (i starts at 0 and 1500 = 15 * 100). - -.. hint:: - It is often useful to get a single 2d slice from a multidimensional cube in order to develop a 2d plot function, for example. - This can be achieved by using the ``next()`` function on the result of - slices:: - - first_slice = next(cube.slices(['grid_latitude', 'grid_longitude'])) - - Once the your code can handle a 2d slice, it is then an easy step to loop over **all** 2d slices within the bigger - cube using the slices method. - - -Cube indexing -^^^^^^^^^^^^^ -In the same way that you would expect a numeric multidimensional array to be **indexed** to take a subset of your -original array, you can **index** a Cube for the same purpose. - - -Here are some examples of array indexing in :py:mod:`numpy`:: - - import numpy as np - # create an array of 12 consecutive integers starting from 0 - a = np.arange(12) - print(a) - - print(a[0]) # first element of the array - - print(a[-1]) # last element of the array - - print(a[0:4]) # first four elements of the array (the same as a[:4]) - - print(a[-4:]) # last four elements of the array - - print(a[::-1]) # gives all of the array, but backwards - - # Make a 2d array by reshaping a - b = a.reshape(3, 4) - print(b) - - print(b[0, 0]) # first element of the first and second dimensions - - print(b[0]) # first element of the first dimension (+ every other dimension) - - # get the second element of the first dimension and all of the second dimension - # in reverse, by steps of two. - print(b[1, ::-2]) - - -Similarly, Iris cubes have indexing capability:: - - import iris - filename = iris.sample_data_path('hybrid_height.nc') - cube = iris.load_cube(filename) - - print(cube) - - # get the first element of the first dimension (+ every other dimension) - print(cube[0]) - - # get the last element of the first dimension (+ every other dimension) - print(cube[-1]) - - # get the first 4 elements of the first dimension (+ every other dimension) - print(cube[0:4]) - - # Get the first element of the first and third dimension (+ every other dimension) - print(cube[0, :, 0]) - - # Get the second element of the first dimension and all of the second dimension - # in reverse, by steps of two. - print(cube[1, ::-2]) diff --git a/docs/iris/src/whatsnew/1.10.rst b/docs/iris/src/whatsnew/1.10.rst deleted file mode 100644 index 26f21c0252..0000000000 --- a/docs/iris/src/whatsnew/1.10.rst +++ /dev/null @@ -1,176 +0,0 @@ -What's New in Iris 1.10 -*********************** - -:Release: 1.10 -:Date: 5th September 2016 - -This document explains the new/changed features of Iris in version 1.10 -(:doc:`View all changes `.) - -Iris 1.10 Features -================== -.. _iris_grib_added: - -* Support has now been added for the - `iris_grib `_ package, which - provides GRIB format support in an optional package, separate from Iris. - - * If ``iris_grib`` is available, it will always be used in place of the older - iris module :mod:`iris.fileformats.grib`. - - * The capabilities of ``iris_grib`` are essentially the same as the existing - :mod:`iris.fileformats.grib` when used with ``iris.FUTURE.strict_grib_load=True``, - with only small detail differences. - - * The old :mod:`iris.fileformats.grib` module is now deprecated and may shortly be - removed. - - * If you are already using the recommended :data:`iris.FUTURE` setting - ``iris.FUTURE.strict_grib_load=True`` this should not cause problems, as - the new package is all-but identical. - - * However, the option ``iris.FUTURE.strict_grib_load`` is itself now - deprecated, so you should remove code that sets it. - - * If, however, your code is still using the older "non-strict" grib - loading, then you may need to make code changes. - - * In particular, the ``field`` object passed to load callbacks is - different. - See :class:`iris.fileformats.grib.message.GribMessage` (the - ``iris_grib.message.GribMessage`` class is the same as this, for now). - - * Please exercise your code with the new iris_grib module, and let us know of - any problems you uncover, such as files that will no longer load with the - new implementation. - -* :meth:`iris.experimental.regrid.PointInCell.regridder` now works across coordinate systems, including non latlon systems. Additionally, the requirement that the source data X and Y coordinates be 2D has been removed. NB: some aspects of this change are backwards incompatible. -* Plotting non-Gregorian calendars is now supported. This adds `nc_time_axis `_ as a dependency. -* Promoting a scalar coordinate to a dimension coordinate with :func:`iris.util.new_axis` no longer loads deferred data. -* The parsing functionality for Cell Methods from netCDF files is available as part of the :mod:`iris.fileformats.netcdf` module as :func:`iris.fileformats.netcdf.parse_cell_methods`. -* Support for the NameIII Version 2 file format has been added. -* Loading netcdf data in Mercator and Stereographic projections now accepts optional extra projection parameter attributes (``false_easting``, ``false_northing`` and ``scale_factor_at_projection_origin``), if they match the default values. - - * NetCDF files which define a Mercator projection where the ``false_easting``, ``false_northing`` and ``scale_factor_at_projection_origin`` match the defaults will have the projection loaded correctly. Otherwise, a warning will be issued for each parameter that does not match the default and the projection will not be loaded. - * NetCDF files which define a Steroegraphic projection where the ``scale_factor_at_projection_origin`` is equal to 1.0 will have the projection loaded correctly. Otherwise, a warning will be issued and the projection will not be loaded. - -* The :mod:`iris.plot` routines :func:`~iris.plot.contour`, :func:`~iris.plot.contourf`, :func:`~iris.plot.outline`, :func:`~iris.plot.pcolor`, :func:`~iris.plot.pcolormesh` and :func:`~iris.plot.points` now support plotting cubes with anonymous dimensions by specifying the *numeric index* of the anonymous dimension within the ``coords`` keyword argument. - - Note that the axis of the anonymous dimension will be plotted in index space. - -* NetCDF loading and saving now supports Cubes that use the LambertConformal coordinate system. -* The experimental structured Fieldsfile loader :func:`~iris.experimental.fieldsfile.load` has been extended to also load structured PP files. - - Structured loading is a streamlined operation, offering the benefit of a significantly faster loading alternative to the more generic :func:`iris.load` mechanism. - - Note that structured loading is not an optimised wholesale replacement of :func:`iris.load`. Structured loading is restricted to input containing contiguously ordered fields for each phenomenon that repeat regularly over the same vertical levels and times. For further details, see :func:`~iris.experimental.fieldsfile.load` - -* :mod:`iris.experimental.regrid_conservative` is now compatible with ESMPy v7. -* Saving zonal (i.e. longitudinal) means to PP files now sets the '64s' bit in LBPROC. -* Loading of 'little-endian' PP files is now supported. -* All appropriate :mod:`iris.plot` functions now handle an ``axes`` keyword, allowing use of the object oriented matplotlib interface rather than pyplot. -* The ability to pass file format object lists into the rules based load pipeline, as used for GRIB, Fields Files and PP has been added. The :func:`iris.fileformats.pp.load_pairs_from_fields` and :func:`iris.fileformats.grib.load_pairs_from_fields` are provided to produce cubes from such lists. These lists may have been filtered or altered using the appropriate :mod:`iris.fileformats` modules. -* Cubes can now have an 'hour' coordinate added with :meth:`iris.coord_categorisation.add_hour`. -* Time coordinates from PP fields with an lbcode of the form 3xx23 are now correctly encoded with a 360-day calendar. -* The loading from and saving to netCDF of CF cell_measure variables is supported, along with their representation within a Cube as :attr:`~iris.cube.Cube.cell_measures`. -* Cubes with anonymous dimensions can now be concatenated. This can only occur along a dimension that is not anonymous. -* NetCDF saving of ``valid_range``, ``valid_min`` and ``valid_max`` cube attributes is now allowed. - -Bugs Fixed -========== -* Altered Cell Methods to display coordinate's standard_name rather than var_name where appropriate to avoid human confusion. -* Saving multiple cubes with netCDF4 protected attributes should now work as expected. -* Concatenating cubes with singleton dimensions (dimensions of size one) now works properly. -* Fixed the ``grid_mapping_name`` and ``secant_latitudes`` handling for the LambertConformal coordinate system. -* Fixed bug in :func:`iris.analysis.cartography.project` where the output projection coordinates didn't have units. -* Attempting to use :meth:`iris.sample_data_path` to access a file that isn't actually Iris sample data now raises a more descriptive error. A note about the appropriate use of `sample_data_path` has also been added to the documentation. -* Fixed a bug where regridding or interpolation with the :class:`~iris.analysis.Nearest` scheme returned floating-point results even when the source data was integer typed. It now always returns the same type as the source data. -* Fixed a bug where regridding circular data would ignore any source masking. This affected any regridding using the :class:`~iris.analysis.Linear` and :class:`~iris.analysis.Nearest` schemes, and also :func:`iris.analysis.interpolate.linear`. -* The ``coord_name`` parameter to :func:`~iris.fileformats.rules.scalar_cell_method` is now checked correctly. -* LBPROC is set correctly when a cube containing the minimum of a variable is saved to a PP file. The IA component of LBTIM is set correctly when saving maximum or minimum values. -* The performance of :meth:`iris.cube.Cube.extract` when a list of values is given to an instance of :class:`iris.Constraint` has been improved considerably. -* Fixed a bug with :meth:`iris.cube.Cube.data` where an :class:`numpy.ndarray` was not being returned for scalar cubes with lazy data. -* When saving in netcdf format, the units of 'latitude' and 'longitude' coordinates specified in 'degrees' are saved as 'degrees_north' and 'degrees_east' respectively, as defined in the CF conventions for netCDF files: sections 4.1 and 4.2. -* Fixed a bug with a class of pp files with lbyr == 0, where the date would cause errors when converting to a datetime object (e.g. when printing a cube). - - When processing a pp field with lbtim = 2x, lbyr == lbyrd == 0 and lbmon == lbmond, 'month' and 'month_number' coordinates are created instead of 'time'. - -* Fixed a bug in :meth:`~iris.analysis.calculus.curl` where the sign of the r-component for spherical coordinates was opposite to what was expected. -* A bug that prevented cube printing in some cases has been fixed. -* Fixed a bug where a deepcopy of a :class:`~iris.coords.DimCoord` would have writable ``points`` and ``bounds`` arrays. These arrays can now no longer be modified in-place. -* Concatenation no longer occurs when the auxiliary coordinates of the cubes do not match. This check is not applied to AuxCoords that span the dimension the concatenation is occuring along. This behaviour can be switched off by setting the ``check_aux_coords`` kwarg in :meth:`iris.cube.CubeList.concatenate` to False. -* Fixed a bug in :meth:`iris.cube.Cube.subset` where an exception would be thrown while trying to subset over a non-dimensional scalar coordinate. - -Incompatible Changes -==================== -* The source and target for :meth:`iris.experimental.regrid.PointInCell.regridder` must now have defined coordinate systems (i.e. not ``None``). Additionally, the source data X and Y coordinates must have the same cube dimensions. - -Deprecations -============ -* Deprecated the :class:`iris.Future` option - ``iris.FUTURE.strict_grib_load``. - This only affected the module :mod:`iris.fileformats.grib`, which is itself - now deprecated. - Please see :ref:`iris_grib package `, above. -* Deprecated the module :mod:`iris.fileformats.grib`. The new package - `iris_grib `_ replaces this - fuctionality, which will shortly be removed. - Please see :ref:`iris_grib package `, above. -* The use of :data:`iris.config.SAMPLE_DATA_DIR` has been deprecated and replaced by the now importable `iris_sample_data `_ package. - -* Deprecated the module :mod:`iris.analysis.interpolate`. - This contains the following public items, all of which are now deprecated and - will be removed in a future release: - - * :func:`~iris.analysis.interpolate.linear` - * :func:`~iris.analysis.interpolate.regrid` - * :func:`~iris.analysis.interpolate.regrid_to_max_resolution` - * :func:`~iris.analysis.interpolate.nearest_neighbour_indices` - * :func:`~iris.analysis.interpolate.nearest_neighbour_data_value` - * :func:`~iris.analysis.interpolate.extract_nearest_neighbour` - * class :class:`~iris.analysis.interpolate.Linear1dExtrapolator`. - - Please use the replacement facilities individually noted in the module - documentation for :mod:`iris.analysis.interpolate` -* The method :meth:`iris.cube.Cube.regridded` has been deprecated. - Please use :meth:`iris.cube.Cube.regrid` instead (see - :meth:`~iris.cube.Cube.regridded` for details). -* Deprecated :data:`iris.fileformats.grib.hindcast_workaround` and :class:`iris.fileformats.grib.GribWrapper`. The class :class:`iris.fileformats.grib.message.GribMessage` provides alternative means of working with GRIB message instances. -* Deprecated the module :mod:`iris.fileformats.ff`. Please use the replacement - facilities in module :mod:`iris.fileformats.um` : - - * :func:`iris.fileformats.um.um_to_pp` replaces :class:`iris.fileformats.ff.FF2PP`. - * :func:`iris.fileformats.um.load_cubes` replaces :func:`iris.fileformats.ff.load_cubes`. - * :func:`iris.fileformats.um.load_cubes_32bit_ieee` replaces :func:`iris.fileformats.ff.load_cubes_32bit_ieee`. - - All other public components are generally deprecated and will be removed in a future release. - -* The :func:`iris.fileformats.pp.as_pairs` and :func:`iris.fileformats.grib.as_pairs` are deprecated. These are replaced with :func:`iris.fileformats.pp.save_pairs_from_cube` and :func:`iris.fileformats.grib.save_pairs_from_cube`. -* ``iris.fileformats.pp_packing`` has been deprecated. Please install the separate `mo_pack `_ package instead. This provides the same functionality. -* Deprecated logging functions (currently used only for rules logging): - :data:`iris.config.iris.config.RULE_LOG_DIR`, - :data:`iris.config.iris.config.RULE_LOG_IGNORE` and - :data:`iris.fileformats.rules.log`. - -* Deprecated all the remaining text rules mechanisms: - :class:`iris.fileformats.rules.DebugString`, - :class:`iris.fileformats.rules.CMAttribute`, - :class:`iris.fileformats.rules.CMCustomAttribute`, - :class:`iris.fileformats.rules.CoordAndDims`, - :class:`iris.fileformats.rules.Rule`, - :class:`iris.fileformats.rules.FunctionRule`, - :class:`iris.fileformats.rules.ProcedureRule`, - :class:`iris.fileformats.rules.RulesContainer` and - :func:`iris.fileformats.rules.calculate_forecast_period`. - -* Deprecated the custom pp save rules mechanism implemented by the functions :func:`iris.fileformats.pp.add_save_rules` and :func:`iris.fileformats.pp.reset_save_rules`. The functions :func:`iris.fileformats.pp.as_fields`, :func:`iris.fileformats.pp.as_pairs` and :func:`iris.fileformats.pp.save_fields` provide alternative means of achieving the same ends. - -Documentation Changes -===================== -* It is now clear that repeated values will form a group under :meth:`iris.cube.Cube.aggregated_by` even if they aren't consecutive. Hence, the documentation for :mod:`iris.cube` has been changed to reflect this. -* The documentation for :meth:`iris.analysis.calculus.curl` has been updated for clarity. -* False claims about :meth:`iris.fileformats.pp.save`, :meth:`iris.fileformats.pp.as_pairs`, and :meth:`iris.fileformats.pp.as_fields` being able to take instances of :class:`iris.cube.CubeList` as inputs have been removed. -* A :doc:`new code example <../examples/Meteorology/wind_speed>`, demonstrating the use of a quiver plot to display wind speeds over Lake Victoria, has been added. -* The docstring for :data:`iris.analysis.SUM` has been updated to explicitly state that weights passed to it aren't normalised internally. -* A note regarding the impossibility of partially collapsing multi-dimensional coordinates has been added to the user guide. - diff --git a/docs/iris/src/whatsnew/1.11.rst b/docs/iris/src/whatsnew/1.11.rst deleted file mode 100644 index eb93ec2f8c..0000000000 --- a/docs/iris/src/whatsnew/1.11.rst +++ /dev/null @@ -1,31 +0,0 @@ -What's New in Iris 1.11 -*********************** - -:Release: 1.11 -:Date: 2016-11-28 - -This document explains the new/changed features of Iris in version 1.11 -(:doc:`View all changes `.) - -Iris 1.11 Features -================== -* If available, display the ``STASH`` code instead of ``unknown / (unknown)`` when printing cubes - with no ``standard_name`` and no ``units``. -* Support for saving to netCDF with data packing has been added. -* The coordinate system :class:`iris.coord_systems.LambertAzimuthalEqualArea` has been added with NetCDF saving support. - -Bugs Fixed -========== -* Fixed a floating point tolerance bug in :func:`iris.experimental.regrid.regrid_area_weighted_rectilinear_src_and_grid` - for wrapped longitudes. -* Allow :func:`iris.util.new_axis` to promote the nominated scalar coordinate of a cube - with a scalar masked constant data payload. -* Fixed a bug where :func:`iris.util._is_circular` would erroneously return false - when coordinate values are decreasing. -* When saving to NetCDF, the existing behaviour of writing string attributes as ASCII has been - maintained across known versions of netCDF4-python. - -Documentation Changes -===================== -* Fuller doc-string detail added to :func:`iris.analysis.cartography.unrotate_pole` and - :func:`iris.analysis.cartography.rotate_pole`. diff --git a/docs/iris/src/whatsnew/1.13.rst b/docs/iris/src/whatsnew/1.13.rst deleted file mode 100644 index 532c160f13..0000000000 --- a/docs/iris/src/whatsnew/1.13.rst +++ /dev/null @@ -1,37 +0,0 @@ -What's New in Iris 1.13 -*********************** - -:Release: 1.13 -:Date: 2017-05-17 - - -This document explains the new/changed features of Iris in version 1.13 -(:doc:`View all changes `.) - -Iris 1.13 Features -================== - -* Allow the reading of NAME trajectories stored by time instead of by particle number. -* An experimental link to python-stratify via :mod:`iris.experimental.stratify`. -* Data arrays may be shared between cubes, and subsets of cubes, by using the :meth:`iris.cube.share_data` flag. - - -Bug Fixes -========= - -* The bounds are now set correctly on the longitude coordinate if a zonal mean diagnostic has been loaded from a PP file as per the CF Standard. -* NetCDF loading will now determine whether there is a string-valued scalar label, i.e. a character variable that only has one dimension (the length of the string), and interpret this correctly. -* A line plot of geographic coordinates (e.g. drawing a trajectory) wraps around the edge of the map cleanly, rather than plotting a segment straight across the map. -* When saving to PP, lazy data is preserved when generating PP fields from cubes so that a list of cubes can be saved to PP without excessive memory requirements. -* An error is now correctly raised if a user tries to perform an arithmetic operation on two cubes with mismatching coordinates. Previously these cases were caught by the add and subtract operators, and now it is also caught by the multiply and divide operators. -* Limited area Rotated Pole datasets where the data range is ``0 <= lambda < 360``, for example as produced in New Zealand, are plotted over a sensible map extent by default. -* Removed the potential for a RuntimeWarning: overflow encountered in ``int_scalars`` which was missed during collapsed calculations. This could trip up unwary users of limited data types, such as int32 for very large numbers (e.g. seconds since 1970). -* The CF conventions state that certain ``formula_terms`` terms may be omitted and assumed to be zero (http://cfconventions.org/cf-conventions/v1.6.0/cf-conventions.html#dimensionless-v-coord) so Iris now allows factories to be constructed with missing terms. -* In the User Guide's contour plot example, clabel inline is set to be False so that it renders correctly, avoiding spurious horizontal lines across plots, although this does make labels a little harder to see. -* The computation of area weights has been changed to a more numerically stable form. The previous form converted latitude to colatitude and used difference of cosines in the cell area computation. This formulation uses latitude and difference of sines. The conversion from latitude to colatitude at lower precision causes errors when computing the cell areas. - -Testing -======= - -* Iris has adopted conda-forge to provide environments for continuous integration testing. - diff --git a/docs/iris/src/whatsnew/1.4.rst b/docs/iris/src/whatsnew/1.4.rst deleted file mode 100644 index 053a6e1096..0000000000 --- a/docs/iris/src/whatsnew/1.4.rst +++ /dev/null @@ -1,205 +0,0 @@ -What's new in Iris 1.4 -********************** - -:Release: 1.4.0 -:Date: 14 June 2013 - -This document explains the new/changed features of Iris in version 1.4. -(:doc:`View all changes `.) - -Iris 1.4 features -================= - -A summary of the main features added with version 1.4: - -* Multiple cubes can now be exported to a NetCDF file. -* Correct nearest-neighbour calculation with circular coords. -* :ref:`Experimental regridding enhancements`. -* :ref:`Iris-Pandas interoperability`. -* NIMROD level type 12 (levels below ground) can now be loaded. -* :ref:`Load cubes from the internet via OPeNDAP`. -* :ref:`GeoTiff export (experimental)`. -* :ref:`Cube merge update`. -* :ref:`Unambiguous season year naming`. -* NIMROD files with multiple fields and period of interest can now be loaded. -* Missing values are now handled when loading GRIB messages. -* PP export rule to calculate forecast period. -* :func:`~iris.cube.Cube.aggregated_by` now maintains array masking. -* IEEE 32bit fieldsfiles can now be loaded. -* NetCDF transverse mercator and climatology data can now be loaded. -* Polar stereographic GRIB data can now be loaded. -* :ref:`Cubes with no vertical coord can now be exported to GRIB`. -* :ref:`Simplified resource configuration`. -* :ref:`Extended GRIB parameter translation`. -* Added an optimisation for single-valued coordinate constraints. -* :ref:`One dimensional linear interpolation fix`. -* :ref:`Fix for iris.analysis.calculus.differentiate`. -* Fixed pickling of cubes with 2D aux coords from NetCDF. -* Fixed bug which ignored the "coords" keyword for certain plots. -* Use the latest release of Cartopy, v0.8.0. - - -Incompatible changes --------------------- -* As part of simplifying the mechanism for accessing test data, - :func:`iris.io.select_data_path`, :data:`iris.config.DATA_REPOSITORY`, - :data:`iris.config.MASTER_DATA_REPOSITORY` and - :data:`iris.config.RESOURCE_DIR` have been removed. - -Deprecations ------------- -* The *add_custom_season_** functions from :mod:`~iris.coord_categorisation` have been deprecated in favour of adding their functionality to the *add_season_** functions - - -.. _OPeNDAP: http://www.opendap.org/about - - -.. _exp-regrid: - -Experimental regridding enhancements -==================================== - -Bilinear, area-weighted and area-conservative regridding functions are now available in -:mod:`iris.experimental`. These functions support masked data and handle -derived coordinates such as hybrid height. The final API is still in development. -In the meantime: - -Bilinear rectilinear regridding -------------------------------- -:func:`~iris.experimental.regrid.regrid_bilinear_rectilinear_src_and_grid` -can be used to regrid a cube onto a horizontal grid defined in a different coordinate system. -The data values are calculated using bilinear interpolation. - -For example:: - - from iris.experimental.regrid import regrid_bilinear_rectilinear_src_and_grid - regridded_cube = regrid_bilinear_rectilinear_src_and_grid(source_cube, target_grid_cube) - -Area-weighted regridding ------------------------- -:func:`~iris.experimental.regrid.regrid_area_weighted_rectilinear_src_and_grid` can be used to regrid a cube -such that the data values of the resulting cube are calculated using the -area-weighted mean. - -For example:: - - from iris.experimental.regrid import regrid_area_weighted_rectilinear_src_and_grid as regrid_area_weighted - regridded_cube = regrid_area_weighted(source_cube, target_grid_cube) - -Area-conservative regridding ----------------------------- -:func:`~iris.experimental.regrid_conservative.regrid_conservative_via_esmpy` -can be used for area-conservative regridding between geographical coordinate systems. -This uses the ESMF library functions, via the ESMPy interface. - -For example:: - - from iris.experimental.regrid_conservative import regrid_conservative_via_esmpy - regridded_cube = regrid_conservative_via_esmpy(source_cube, target_grid_cube) - - -.. _iris-pandas: - -Iris-Pandas interoperablilty -============================ -Conversion to and from Pandas Series_ and DataFrames_ is now available. -See :mod:`iris.pandas` for more details. - -.. _Series: http://pandas.pydata.org/pandas-docs/stable/api.html#series -.. _DataFrames: http://pandas.pydata.org/pandas-docs/stable/api.html#dataframe - - -.. _load-opendap: - -Load cubes from the internet via OPeNDAP -======================================== -Cubes can now be loaded directly from the internet, via OPeNDAP_. - -For example:: - - cubes = iris.load("http://geoport.whoi.edu/thredds/dodsC/bathy/gom15") - - -.. _geotiff_export: - -GeoTiff export -============== -With this experimental feature, two dimensional cubes can now be exported to GeoTiff files. - -For example:: - - from iris.experimental.raster import export_geotiff - export_geotiff(cube, filename) - -.. note:: - - This is a raw data export only and does not save Iris plots. - - -.. _cube-merge-update: - -Cube merge update -================= -Cube merging now favours numerical coordinates over string coordinates -to describe a dimension, and :class:`~iris.coords.DimCoord` over -:class:`~iris.coords.AuxCoord`. These modifications prevent the error: -*"No functional relationship between separable and inseparable candidate dimensions"*. - - -.. _season-year-name: - -Unambiguous season year naming -============================== -The default names of categorisation coordinates are now less ambiguous. -For example, :func:`~iris.coord_categorisation.add_month_number` and -:func:`~iris.coord_categorisation.add_month_fullname` now create -"month_number" and "month_fullname" coordinates. - - -.. _grib-novert: - -Cubes with no vertical coord can now be exported to GRIB -======================================================== -Iris can now export cubes with no vertical coord to GRIB. -The solution is still under discussion: See https://github.com/SciTools/iris/issues/519. - - -.. _simple_cfg: - -Simplified resource configuration -================================= -A new configuration variable called :data:`iris.config.TEST_DATA_DIR` -has been added, replacing the previous combination of -:data:`iris.config.MASTER_DATA_REPOSITORY` and -:data:`iris.config.DATA_REPOSITORY`. This constant should be the path -to a directory containing the test data required by the unit tests. It can -be set by adding a ``test_data_dir`` entry to the ``Resources`` section of -``site.cfg``. See :mod:`iris.config` for more details. - - -.. _grib_params: - -Extended GRIB parameter translation -=================================== - - More GRIB2 params are recognised on input. - - Now translates some codes on GRIB2 output. - - Some GRIB2 params may load with a different standard_name. - - - -.. _one-d-linear: - -One dimensional linear interpolation fix -======================================== -:func:`~iris.analysis.interpolate.linear` can now extrapolate from a single point -assuming a gradient of zero. This prevents an issue when loading cross sections -with a hybrid height coordinate, on a staggered grid and only a single orography field. - - -.. _calc-diff-fix: - -Fix for iris.analysis.calculus.differentiate -============================================= -A bug in :func:`~iris.analysis.calculus.differentiate` that had the potential to cause -the loss of coordinate metadata when calculating the curl or the derivative of a cube has been fixed. - diff --git a/docs/iris/src/whatsnew/1.7.rst b/docs/iris/src/whatsnew/1.7.rst deleted file mode 100644 index 2f3a52fbb9..0000000000 --- a/docs/iris/src/whatsnew/1.7.rst +++ /dev/null @@ -1,248 +0,0 @@ -What's new in Iris 1.7 -********************** - -This document explains the new/changed features of Iris in version 1.7. -(:doc:`View all changes `.) - -:Release: 1.7.4 -:Date: 15th April 2015 - -Iris 1.7 features -================= - -.. _showcase: - -.. admonition:: Showcase: Iris is making use of Biggus - - Iris is now making extensive use of `Biggus `_ - for virtual arrays and lazy array evaluation. In practice this means that analyses - of cubes with data bigger than the available system memory are now possible. - - Other than the improved functionality the changes are mostly - transparent; for example, before the introduction of biggus, MemoryErrors - were likely for very large datasets:: - - >>> result = extremely_large_cube.collapsed('time', iris.analyis.MEAN) - MemoryError - - Now, for supported operations, the evaluation is lazy (i.e. it doesn't take - place until the actual data is subsequently requested) and can handle data - larger than available system memory:: - - >>> result = extremely_large_cube.collapsed('time', iris.analyis.MEAN) - >>> print(type(result)) - - - Memory is still a limiting factor if ever the data is desired as a NumPy array - (e.g. via :data:`cube.data `), but additional methods have - been added to the Cube to support querying and subsequently accessing the "lazy" - data form (see :meth:`~iris.cube.Cube.has_lazy_data` and - :meth:`~iris.cube.Cube.lazy_data`). - -.. admonition:: Showcase: New interpolation and regridding API - - New interpolation and regridding interfaces have been added which simplify and - extend the existing functionality. - - The interfaces are exposed on the cube in the form of the - :meth:`~iris.cube.Cube.interpolate` and :meth:`~iris.cube.Cube.regrid` methods. - Conceptually the signatures of the methods are:: - - interpolated_cube = cube.interpolate(interpolation_points, interpolation_scheme) - - and:: - - regridded_cube = cube.regrid(target_grid_cube, regridding_scheme) - - Whilst not all schemes have been migrated to the new interface, - :class:`iris.analysis.Linear` defines both linear interpolation and regridding, - and :class:`iris.analysis.AreaWeighted` defines an area weighted regridding - scheme. - -.. admonition:: Showcase: Merge and concatenate reporting - - Merge reporting is designed as an aid to the merge processes. Should merging - a :class:`~iris.cube.CubeList` fail, merge reporting means that a descriptive - error will be raised that details the differences between the cubes in the - :class:`~iris.cube.CubeList` that prevented the merge from being successful. - - A new :class:`~iris.cube.CubeList` method, called - :meth:`~iris.cube.CubeList.merge_cube`, has been introduced. Calling it on a - :class:`~iris.cube.CubeList` will result in a single merged - :class:`~iris.cube.Cube` being returned or an error message being raised - that describes why the merge process failed. - - The following example demonstrates the error message that describes a merge - failure caused by cubes having differing attributes:: - - >>> cube_list = iris.cube.CubeList((c1, c2)) - >>> cube_list.merge_cube() - Traceback (most recent call last): - ... - raise iris.exceptions.MergeError(msgs) - iris.exceptions.MergeError: failed to merge into a single cube. - cube.attributes keys differ: 'foo' - - The naming of this new method mirrors that of Iris load functions, where - one would always expect a :class:`~iris.cube.CubeList` from :func:`iris.load` - and a :class:`~iris.cube.Cube` from :func:`iris.load_cube`. - - Concatenate reporting is the equivalent process for concatenating a - :class:`~iris.cube.CubeList`. It is accessed through the method - :meth:`~iris.cube.CubeList.concatenate_cube`, which will return a single - concatenated cube or produce an error message that describes why the - concatenate process failed. - -.. admonition:: Showcase: Cube broadcasting - - When performing cube arithmetic, cubes now follow similar broadcasting rules - as NumPy arrays. - - However, the additional richness of Iris coordinate meta-data provides an - enhanced capability beyond the basic broadcasting behaviour of NumPy. - - This means that when performing cube arithmetic, the dimensionality and shape of - cubes no longer need to match. For example, if the dimensionality of a cube is - reduced by collapsing, then the result can be used to subtract from the original - cube to calculate an anomaly:: - - >>> time_mean = original_cube.collapsed('time', iris.analysis.MEAN) - >>> mean_anomaly = original_cube - time_mean - - Given both broadcasting **and** coordinate meta-data, Iris can now perform - arithmetic with cubes that have similar but not identical shape:: - - >>> similar_cube = original_cube.copy() - >>> similar_cube.transpose() - >>> zero_cube = original_cube - similar_cube - -* Merge reporting that raises a descriptive error if the merge process fails. -* Linear interpolation and regridding now make use of SciPy's RegularGridInterpolator - for much faster linear interpolation. -* NAME file loading now handles the "no time averaging" column and translates - height/altitude above ground/sea-level columns into appropriate coordinate metadata. -* The NetCDF saver has been extended to allow saving of cubes with hybrid pressure - auxiliary factories. -* PP/FF loading supports LBLEV of 9999. -* Extended GRIB1 loading to support data on hybrid pressure levels. -* :func:`iris.coord_categorisation.add_day_of_year` can be used to add categorised - day of year coordinates based on time coordinates with non-Gregorian calendars. -* Support for loading data on reduced grids from GRIB files in raw form without - automatically interpolating to a regular grid. -* The coordinate systems :class:`iris.coord_systems.Orthographic` and - :class:`iris.coord_systems.VerticalPerspective` (for imagery from geostationary - satellites) have been added. -* Extended NetCDF loading to support the "ocean sigma over z" auxiliary coordinate - factory. -* Support added for loading CF-NetCDF data with bounds arrays that are missing a - vertex dimension. -* :meth:`iris.cube.Cube.rolling_window` can now be used with string-based - :class:`iris.coords.AuxCoord` instances. -* Loading of PP and FF files has been optimised through deferring creation of - PPField attributes. -* Automatic association of a coordinate's CF formula terms variable with the - data variable associated with that coordinate. -* PP loading translates cross-section height into a dimensional auxiliary coordinate. -* String auxiliary coordinates can now be plotted with the Iris plotting wrappers. -* :func:`iris.analysis.geometry.geometry_area_weights` now allows for the calculation of - normalized cell weights. -* Many new translations between the CF spec and STASH codes or GRIB2 parameter codes. -* PP save rules add the data's UM Version to the attributes of the saved file - when appropriate. -* NetCDF reference surface variable promotion available through the - :class:`iris.FUTURE` mechanism. -* A speed improvement in calculation of :func:`iris.analysis.geometry.geometry_area_weights`. -* The mdtol keyword was added to area-weighted regridding to allow control of the - tolerance for missing data. For a further description of this concept, see - :class:`iris.analysis.AreaWeighted`. -* Handling for patching of the CF conventions global attribute via a defined - cf_patch_conventions function. -* Deferred GRIB data loading has been introduced for reduced memory consumption when - loading GRIB files. -* Concatenate reporting that raises a descriptive error if the concatenation - process fails. -* A speed improvement when loading PP or FF data and constraining on STASH code. - -Bugs fixed -========== -* Data containing more than one reference cube for constructing hybrid height - coordinates can now be loaded. -* Removed cause of increased margin of error when interpolating. -* Changed floating-point precision used when wrapping points for interpolation. -* Mappables that can be used to generate colorbars are now returned by Iris - plotting wrappers. -* NetCDF load ignores over-specified formula terms on bounded dimensionless vertical - coordinates. -* Auxiliary coordinate factory loading now correctly interprets formula term - varibles for "atmosphere hybrid sigma pressure" coordinate data. -* Corrected comparison of NumPy NaN values in cube merge process. -* Fixes for :meth:`iris.cube.Cube.intersection` to correct calculating the intersection - of a cube with split bounds, handling of circular coordinates, handling of - monotonically descending bounded coordinats and for finding a wrapped two-point - result and longitude tolerances. -* A bug affecting :meth:`iris.cube.Cube.extract` and :meth:`iris.cube.CubeList.extract` - that led to unexpected behaviour when operating on scalar cubes has been fixed. -* Aggregate_by may now be passed single-value coordinates. -* Making a copy of a :class:`iris.coords.DimCoord` no longer results in the writeable - flag on the copied points and bounds arrays being set to True. -* Can now save to PP a cube that has vertical levels but no orography. -* Fix a bug causing surface altitude and surface pressure fields to not appear - in cubes loaded with a STASH constraint. -* Fixed support for :class:`iris.fileformats.pp.STASH` objects in STASH constraints. -* A fix to avoid a problem where cube attribute names clash with NetCDF reserved attribute names. -* A fix to allow :meth:`iris.cube.CubeList.concatenate` to deal with descending coordinate order. -* Add missing NetCDF attribute `varname` when constructing a new :class:`iris.coords.AuxCoord`. -* The datatype of time arrays converted with :func:`iris.util.unify_time_units` is now preserved. - -Bugs fixed in v1.7.3 -^^^^^^^^^^^^^^^^^^^^ -* Scalar dimension coordinates can now be concatenated with :meth:`iris.cube.CubeList.concatenate`. -* Arbitrary names can no longer be set for elements of a :class:`iris.fileformats.pp.SplittableInt`. -* Cubes that contain a pseudo-level coordinate can now be saved to PP. -* Fixed a bug in the FieldsFile loader that prevented it always loading all available fields. - -Bugs fixed in v1.7.4 -^^^^^^^^^^^^^^^^^^^^ -* :meth:`Coord.guess_bounds` can now deal with circular coordinates. -* :meth:`Coord.nearest_neighbour_index` can now work with descending bounds. -* Passing `weights` to :meth:`Cube.rolling_window` no longer prevents other - keyword arguments from being passed to the aggregator. -* Several minor fixes to allow use of Iris on Windows. -* Made use of the new standard_parallels keyword in Cartopy's LambertConformal - projection (Cartopy v0.12). Older versions of Iris will not be able to - create LambertConformal coordinate systems with Cartopy >= 0.12. - -Incompatible changes -==================== -* Saving a cube with a STASH attribute to NetCDF now produces a variable - with an attribute of "um_stash_source" rather than "ukmo__um_stash_source". -* Cubes saved to NetCDF with a coordinate system referencing a spherical ellipsoid - now result in the grid mapping variable containing only the "earth_radius" attribute, - rather than the "semi_major_axis" and "semi_minor_axis". -* Collapsing a cube over all of its dimensions now results in a scalar cube rather - than a 1d cube. - -Deprecations -============ -* :func:`iris.util.ensure_array` has been deprecated. -* Deprecated the :func:`iris.fileformats.pp.reset_load_rules` and - :func:`iris.fileformats.grib.reset_load_rules` functions. -* Matplotlib is no longer a core Iris dependency. - -Documentation Changes -===================== -* New sections on :ref:`cube broadcasting ` and - :doc:`regridding and interpolation ` - have been added to the :doc:`user guide `. -* An example demonstrating custom log-scale colouring has been added. - See :ref:`General-anomaly_log_colouring`. -* An example demonstrating the creation of a custom - :class:`iris.analysis.Aggregator` has been added. - See :ref:`General-custom_aggregation`. -* An example of reprojecting data from 2D auxiliary spatial coordinates - (such as that from the ORCA grid) has been added. See :ref:`General-orca_projection`. -* A clarification of the behaviour of :func:`iris.analysis.calculus.differentiate`. -* A new :doc:`"whitepapers" ` section has been added to the documentation along - with the addition of a paper providing an :doc:`overview of the load process for UM-like - fileformats (e.g. PP and Fieldsfile) `. - diff --git a/docs/iris/src/whatsnew/1.8.rst b/docs/iris/src/whatsnew/1.8.rst deleted file mode 100644 index c763411ed8..0000000000 --- a/docs/iris/src/whatsnew/1.8.rst +++ /dev/null @@ -1,181 +0,0 @@ -What's new in Iris 1.8 -********************** - -:Release: 1.8.1 -:Date: 3rd June 2015 - -This document explains the new/changed features of Iris in version 1.8. -(:doc:`View all changes `.) - -Iris 1.8 features -================= - -.. _showcase: - -.. admonition:: Showcase: Rotate winds - - Iris can now rotate and unrotate wind vector data by transforming the wind - vector data to another coordinate system. - - For example:: - - >>> from iris.analysis.cartography import rotate_winds - >>> u_cube = iris.load_cube('my_rotated_u_wind_cube.pp') - >>> v_cube = iris.load_cube('my_rotated_v_wind_cube.pp') - >>> target_cs = iris.coord_systems.GeogCS(6371229.0) - >>> u_prime, v_prime = rotate_winds(u_cube, v_cube, target_cs) - -.. admonition:: Showcase: Nearest-neighbour scheme - - A nearest-neighbour scheme for interpolation and regridding has been added - to Iris. This joins the existing :class:`~iris.analysis.Linear` and - :class:`~iris.analysis.AreaWeighted` interpolation and regridding schemes. - - For example:: - - >>> result = cube.interpolate(sample_points, iris.analysis.Nearest()) - >>> regridded_cube = cube.regrid(target_grid, iris.analysis.Nearest()) - -.. admonition:: Showcase: Slices over a coordinate - - You can slice over one or more dimensions of a cube using :meth:`iris.cube.Cube.slices_over`. - This provides similar functionality to :meth:`~iris.cube.Cube.slices` but with - almost the opposite outcome. - - Using :meth:`~iris.cube.Cube.slices` to slice a cube on a selected dimension returns - all possible slices of the cube with the selected dimension retaining its dimensionality. - Using :meth:`~iris.cube.Cube.slices_over` to slice a cube on a selected - dimension returns all possible slices of the cube over the selected dimension. - - To demonstrate this:: - - >>> cube = iris.load(iris.sample_data_path('colpex.pp'))[0] - >>> print(cube.summary(shorten=True)) - air_potential_temperature / (K) (time: 6; model_level_number: 10; grid_latitude: 83; grid_longitude: 83) - >>> my_slice = next(cube.slices('time')) - >>> my_slice_over = next(cube.slices_over('time')) - >>> print(my_slice.summary(shorten=True)) - air_potential_temperature / (K) (time: 6) - >>> print(my_slice_over.summary(shorten=True)) - air_potential_temperature / (K) (model_level_number: 10; grid_latitude: 83; grid_longitude: 83) - - -* :func:`iris.cube.CubeList.concatenate` now works with `biggus `_ arrays and so - now supports concatenation of cubes with deferred data. -* Improvements to NetCDF saving through using biggus: - - * A cube's lazy data payload will still be lazy after saving; the data will not - be loaded into memory by the save operation. - * Cubes with data payloads larger than system memory can now be saved to NetCDF - through biggus streaming the data to disk. - -* :func:`iris.util.demote_dim_coord_to_aux_coord` and :func:`iris.util.promote_aux_coord_to_dim_coord` - allow a coordinate to be easily demoted or promoted within a cube. -* :func:`iris.util.squeeze` removes all length 1 dimensions from a cube, and demotes - any associated squeeze dimension :class:`~iris.coords.DimCoord` to be a scalar coordinate. -* :meth:`iris.cube.Cube.slices_over`, which returns an iterator of all sub-cubes along a given - coordinate or dimension index. -* :meth:`iris.cube.Cube.interpolate` now accepts datetime.datetime and - netcdftime.datetime instances for date or time coordinates. -* Many new and updated translations between CF spec and STASH codes or GRIB2 parameter - codes. -* PP/FF loader creates a height coordinate at 1.5m or 10m for certain relevant stash codes. -* Lazy aggregator support for the :class:`standard deviation ` - aggregator has been added. -* A speed improvement in calculation of :func:`iris.analysis.cartography.area_weights`. -* Experimental support for unstructured grids has been added with :func:`iris.experimental.ugrid`. - This has been implemented using `UGRID `_. -* :meth:`iris.cube.CubeList.extract_overlapping` supports extraction of cubes over - regions where common coordinates overlap, over multiple coordinates. -* Warnings raised due to invalid units in loaded data have been suppressed. -* Experimental low-level read and write access for FieldsFile variants is now supported - via :class:`iris.experimental.um.FieldsFileVariant`. -* PP loader will return cubes for all fields prior to a field with a problematic - header before raising an exception. -* NetCDF loader skips invalid global attributes, raising a warning rather than raising an - exception. -* A warning is now raised rather than an exception when constructing an - :class:`~iris.aux_factory.AuxCoordFactory` fails. -* Supported :class:`aux coordinate factories ` - have been extended to include: - - * ``ocean sigma coordinate``, - * ``ocean s coordinate``, - * ``ocean s coordinate, generic form 1``, and - * ``ocean s coordinate, generic form 2``. - -* :meth:`iris.cube.Cube.intersection` now supports taking a points-only intersection. - Any bounds on intersected coordinates are ignored but retained. -* The FF loader's known handled grids now includes ``Grid 21``. -* A :class:`nearest neighbour ` scheme is now provided for - :meth:`iris.cube.Cube.interpolate` and :meth:`iris.cube.Cube.regrid`. -* :func:`iris.analysis.cartography.rotate_winds` supports transformation of wind vectors - to a different coordinate system. -* NumPy universal functions can now be applied to cubes using - :func:`iris.analysis.maths.apply_ufunc`. -* Generic functions can be applied to :class:`~iris.cube.Cube` instances using - :class:`iris.analysis.maths.IFunc`. -* The :class:`iris.analysis.Linear` scheme now supports regridding as well as interpolation. - This enables :meth:`iris.cube.Cube.regrid` to perform bilinear regridding, which now - replaces the experimental routine "iris.experimental.regrid.regrid_bilinear_rectilinear_src_and_grid". - -Bugs fixed -========== - -1.8.0 ------- -* Fix in netCDF loader to correctly determine whether the longitude coordinate - (including scalar coordinates) is circular. -* :meth:`iris.cube.Cube.intersection` now supports bounds that extend slightly beyond 360 - degrees. -* Lateral Boundary Condition (LBC) type FieldFiles are now handled correctly by the FF loader. -* Making a copy of a scalar cube with no data now correctly copies the data array. -* Height coordinates in NAME trajectory output files have been changed to match other - NAME output file formats. -* Fixed datatype when loading an ``integer_constants`` array from a FieldsFile. -* FF/PP loader adds appropriate cell methods for ``lbtim.ib = 3`` intervals. -* An exception is raised if the units of the latitude and longitude coordinates - of the cube passed into :func:`iris.analysis.cartography.area_weights` are not - convertible to radians. -* GRIB1 loader now creates a time coordinate for a time range indicator of 2. -* NetCDF loader now loads units that are empty strings as dimensionless. - -1.8.1 ------- -* The PP loader now carefully handles floating point errors in date time conversions to hours. -* The handling fill values for lazy data loaded from NetCDF files is altered, such that the - _FillValue set in the file is preserved through lazy operations. -* The risk that cube intersections could return incorrect results due to floating point - tolerances is reduced. -* The new GRIB2 loading code is altered to enable the loading of various data representation - templates; the data value unpacking is handled by the GRIB API. -* Saving cube collections to NetCDF, where multiple similar aux-factories exist within the cubes, - is now carefully handled such that extra file variables are created where required in some cases. - -1.8.2 ------ -* A fix to prevent the error: *AttributeError: 'module' object has no attribute 'date2num'*. - This was caused by the function :func:`netcdftime.date2num` being removed from the netCDF4 - package in recent versions. - -Deprecations -============ -* The original GRIB loader has been deprecated and replaced with a new - template-based GRIB loader. -* Deprecated default NetCDF save behaviour of assigning the outermost - dimension to be unlimited. Switch to the new behaviour with no auto - assignment by setting :data:`iris.FUTURE.netcdf_no_unlimited` to True. -* The former experimental method - "iris.experimental.regrid.regrid_bilinear_rectilinear_src_and_grid" has been removed, as - :class:`iris.analysis.Linear` now includes this functionality. - -Documentation Changes -===================== -* A chapter on :doc:`merge and concatenate ` has been - added to the :doc:`user guide `. -* A section on installing Iris using `conda `_ has been - added to the :doc:`install guide `. -* Updates to the chapter on - :doc:`regridding and interpolation ` - have been added to the :doc:`user guide `. - diff --git a/docs/iris/src/whatsnew/1.9.rst b/docs/iris/src/whatsnew/1.9.rst deleted file mode 100644 index 7a4848b434..0000000000 --- a/docs/iris/src/whatsnew/1.9.rst +++ /dev/null @@ -1,129 +0,0 @@ -What's New in Iris 1.9 -********************** - -:Release: 1.9.2 -:Date: 28th January 2016 - -This document explains the new/changed features of Iris in version 1.9 -(:doc:`View all changes `.) - -Iris 1.9 Features -================= -* Support for running on Python 3.4 has been added to the whole code base. Some features which - depend on external libraries will not be available until they also support Python 3, namely: - - * gribapi does not yet provide a Python 3 interface - -* Added the UM pseudo level type to the information made available in the STASH_TRANS table in :mod:`iris.fileformats.um._ff_cross_references` -* When reading "cell_methods" attributes from NetCDF files, allow optional whitespace before the colon. - This is not strictly in the CF spec, but is a common occurrence. -* Basic cube arithemetic (plus, minus, times, divide) now supports lazy evaluation. -* :meth:`iris.analysis.cartography.rotate_winds` can now operate much faster on multi-layer (i.e. > 2-dimensional) cubes, - as it calculates rotation coefficients only once and reuses them for additional layers. - -* Linear regridding of a multi-layer (i.e. > 2-dimensional) cube is now much faster, - as it calculates transform coefficients just once and reuses them for additional layers. -* Ensemble statistics can now be saved to GRIB2, using Product Definition Template 4.11. - -* Loading of NetCDF data with ocean vertical coordinates now returns a 'depth' in addition to an 'eta' cube. - This operates on specific defined dimensionless coordinates : see CF spec version 1.6, Appendix D. - -* :func:`iris.analysis.stats.pearsonr` updates: - - * Cubes can now be different shapes, provided one is broadcastable to the - other. - * Accepts weights keyword for weighted correlations. - * Accepts mdtol keyword for missing data tolerance level. - * Accepts common_mask keyword for restricting calculation to unmasked pairs of - cells. - -* Added a new point-in-cell regridding scheme, :class:`iris.experimental.regrid.PointInCell`. -* Added :meth:`iris.analysis.WPERCENTILE` - a new weighted aggregator for calculating - percentiles. -* Added cell-method translations for LBPROC=64 and 192 in UM files, encoding 'zonal mean' and 'zonal+time mean'. - -* Support for loading GRIB2 messages defined on a Lambert conformal grid has been added to - the GRIB2 loader. -* Data on potential-temperature (theta) levels can now be saved to GRIB2, with a fixed surface type of 107. -* Added several new helper functions for file-save customisation, - (see also : :doc:`Saving Iris Cubes `): - - * :meth:`iris.fileformats.grib.as_pairs` - * :meth:`iris.fileformats.grib.as_messages` - * :meth:`iris.fileformats.grib.save_messages` - * :meth:`iris.fileformats.pp.as_pairs` - * :meth:`iris.fileformats.pp.as_fields` - * :meth:`iris.fileformats.pp.save_fields` -* Loading data from GRIB2 now supports most of the currently defined 'data representation templates' : - code numbers 0, 1, 2, 3, 4, 40, 41, 50, 51 and 61. -* When a Fieldsfile is opened for update as a :class:`iris.experimental.um.FieldsFileVariant`, - unmodified packed data in the file can now be retained in the original form. - Previously it could only be stored in an unpacked form. -* When reading and writing NetCDF data, the CF 'flag' attributes, - "flag_masks", "flag_meanings" and "flag_values" are now preserved through Iris load and save. -* `mo_pack `_ was added as an optional dependency. - It is used to encode and decode data in WGDOS packed form. -* The :meth:`iris.experimental.um.Field.get_data` method can now be used to read Fieldsfile data - after the original :class:`iris.experimental.um.FieldsFileVariant` has been closed. - -Bugs Fixed -========== -* Fixed a bug in :meth:`iris.unit.Unit.convert` - (and the equivalent in `cf_units `_) - so that it now converts data to the native endianness, without which udunits could not read it correctly. -* Fixed a bug with loading WGDOS packed data in :mod:`iris.experimental.um`, - which could occasionally crash, with some data. -* Ignore non-numeric suffices in the numpy version string, which would otherwise crash some regridding routines. -* fixed a bug in :mod:`iris.fileformats.um_cf_map` where the standard name - for the stash code m01s12i187 was incorrectly set, such that it is inconsistent - with the stated unit of measure, 'm s-1'. The different name, a long_name - of 'change_over_time_in_upward_air_velocity_due_to_advection' with - units of 'm s-1' is now used instead. -* Fixed a bug in :meth:`iris.cube.Cube.intersection`. - When edge points were at (base + period), intersection would unnecessarily wrap the data. -* Fixed a bug in :mod:`iris.fileformats.pp`. - A previous release removed the ability to pass a partial constraint on STASH attribute. -* :meth:`iris.plot.default_projection_extent` now correctly raises an exception if a cube has X bounds but no Y bounds, or vice versa. - Previously it never failed this, as the test was wrong. -* When loading NetCDF data, a "units" attribute containing unicode characters is now transformed by backslash-replacement. - Previously this caused a crash. Note: unicode units are *not supported in the CF conventions*. -* When saving to NetCDF, factory-derived auxiliary coordinates are now correctly saved with different names when they are not identical. - Previously, such coordinates could be saved with the same name, leading to errors. -* Fixed a bug in :meth:`iris.experimental.um.FieldsFileVariant.close`, - which now correctly allocates extra blocks for larger lookups when saving. - Previously, when larger files open for update were closed, they could be written out with data overlapping the lookup table. -* Fixed a bug in :class:`iris.aux_factory.OceanSigmaZFactory` - which sometimes caused crashes when fetching the points of an "ocean sigma z" coordinate. - -Version 1.9.1 -------------- -* Fixed a unicode bug preventing standard names from being built cleanly when installing in Python3 - -Version 1.9.2 -------------- -* New warning regarding data loss if writing to an open file which is also open to read, with lazy data. -* Removal of a warning about data payload loading from concatenate. -* Updates to concatenate documentation. -* Fixed a bug with a name change in the netcdf4-python package. -* Fixed a bug building the documentation examples. -* Fixed a bug avoiding sorting classes directly when :meth:`iris.cube.Cube.coord_system` is used in Python3. -* Fixed a bug regarding unsuccessful dot import. - -Incompatible Changes -==================== -* GRIB message/file reading and writing may not be available for Python 3 due to GRIB API limitations. - -Deprecations -============ -* Deprecated :mod:`iris.unit`, with unit functionality provided by `cf_units `_ instead. -* When loading from NetCDF, a deprecation warning is emitted if there is vertical coordinate information - that *would* produce extra result cubes if :data:`iris.FUTURE.netcdf_promote` were set, - but it is *not* set. -* Deprecated :class:`iris.aux_factory.LazyArray` - -Documentation Changes -===================== -* A chapter on :doc:`saving iris cubes ` has been - added to the :doc:`user guide `. -* Added script and documentation for building a what's new page from developer-submitted contributions. - See :doc:`Contributing a "What's New" entry `. diff --git a/docs/iris/src/whatsnew/2.3.rst b/docs/iris/src/whatsnew/2.3.rst deleted file mode 100644 index c5a6060146..0000000000 --- a/docs/iris/src/whatsnew/2.3.rst +++ /dev/null @@ -1,215 +0,0 @@ -What's New in Iris 2.3.0 -************************ - -:Release: 2.3.0 -:Date: 2019-10-04 - -This document explains the new/changed features of Iris in version 2.3.0 -(:doc:`View all changes `.) - -Iris 2.3.0 Features -=================== -.. _showcase: - -.. admonition:: Increased Support for CF 1.7 - - We have introduced several changes that contribute to Iris's support for - the CF Conventions, including some CF 1.7 additions. We are now able to - support: - - * :ref:`Climatological Coordinates` - * :ref:`Standard name modifiers` - * :ref:`Geostationary projection` - - You can read more about each of these below. - - Additionally, the conventions attribute, added by Iris when saving to - NetCDF, has been updated to "CF-1.7", accordingly. - -.. _climatological: -.. admonition:: Climatological Coordinate Support - - Iris can now load, store and save `NetCDF climatological coordinates - `_. Any cube time - coordinate can be marked as a climatological time axis using the boolean - property: ``climatological``. The climatological bounds are stored in the - coordinate's ``bounds`` property. - - When an Iris climatological coordinate is saved in NetCDF, the NetCDF - coordinate variable will be given a 'climatology' attribute, and the - contents of the - coordinate's ``bounds`` property are written to a NetCDF boundary variable - called '_bounds'. These are in place of a standard - 'bounds' attribute and accompanying boundary variable. See below - for an `example adapted from CF conventions `_: - - .. code-block:: none - - dimensions: - time=4; - bnds=2; - variables: - float temperature(time,lat,lon); - temperature:long_name="surface air temperature"; - temperature:cell_methods="time: minimum within years time: mean over years"; - temperature:units="K"; - double time(time); - time:climatology="time_climatology"; - time:units="days since 1960-1-1"; - double time_climatology(time,bnds); - data: // time coordinates translated to date/time format - time="1960-4-16", "1960-7-16", "1960-10-16", "1961-1-16" ; - time_climatology="1960-3-1", "1990-6-1", - "1960-6-1", "1990-9-1", - "1960-9-1", "1990-12-1", - "1960-12-1", "1991-3-1" ; - - If a climatological time axis is detected when loading NetCDF - - indicated by the format described above - the ``climatological`` property - of the Iris coordinate will be set to ``True``. - -.. admonition:: New Chunking Strategy - - Iris now makes better choices of Dask chunk sizes when loading from NetCDF - files: If a file variable has small, specified chunks, Iris will now choose - Dask chunks which are a multiple of these up to a default target size. - - This is particularly relevant to files with an unlimited dimension, which - previously could produce a large number of small chunks. This had an adverse - effect on performance. - - In addition, Iris now takes its default chunksize from the default configured - in Dask itself, i.e. ``dask.config.get('array.chunk-size')``. - -.. admonition:: Lazy Statistics - - Several statistical operations can now be done lazily, taking advantage of the - performance improvements offered by Dask: - - * :meth:`~iris.cube.Cube.aggregated_by` - * :class:`~iris.analysis.RMS` (more detail below) - * :class:`~iris.analysis.MEAN` - ----- - -.. _geostationary: -.. _standard_name: -.. _conventions_1.7: - -* Cube data equality testing (and hence cube equality) now uses a more - relaxed - tolerance : This means that some cubes may now test 'equal' that previously - did not. - Previously, Iris compared cube data arrays using: - ``abs(a - b) < 1.e-8`` - - We now apply the default operation of :func:`numpy.allclose` instead, - which is equivalent to: - ``abs(a - b) < (1.e-8 + 1.e-5 * b)`` - -* Added support to render HTML for :class:`~iris.cube.CubeList` in Jupyter - Notebooks and JupyterLab. -* Loading CellMeasures with integer values is now supported. -* New coordinate system: :class:`iris.coord_systems.Geostationary`, - including load and save support, based on the `CF Geostationary projection - definition `_. -* :class:`iris.coord_systems.VerticalPerspective` can now be saved to and - loaded from NetCDF files. -* :class:`iris.experimental.regrid.PointInCell` moved to - :class:`iris.analysis.PointInCell` to make this regridding scheme public -* Iris now supports standard name modifiers. See `Appendix C, Standard Name Modifiers `_ for more information. -* :meth:`iris.cube.Cube.remove_cell_measure` now also allows removal of a cell - measure by its name (previously only accepted a CellMeasure object). -* The :data:`iris.analysis.RMS` aggregator now supports a lazy calculation. - However, the "weights" keyword is not currently supported by this, so a - *weighted* calculation will still return a realised result, *and* force - realisation of the original cube data. -* Iris now supports NetCDF Climate and Forecast (CF) Metadata Conventions 1.7 (see `CF 1.7 Conventions Document `_ for more information) - - -Iris 2.3.0 Dependency Updates -============================= -* Iris now supports Proj4 up to version 5, but not yet 6 or beyond, pending - `fixes to some cartopy tests `_. -* Iris now requires Dask >= 1.2 to allow for improved coordinate equality - checks. - - -Bugs Fixed -========== -* Cube equality of boolean data is now handled correctly. -* Fixed a bug where cell measures were incorrect after a cube - :meth:`~iris.cube.Cube.transpose` operation. Previously, this resulted in - cell-measures that were no longer correctly mapped to the cube dimensions. -* The :class:`~iris.coords.AuxCoord` disregarded masked points and bounds, as did the :class:`~iris.coords.DimCoord`. - Fix permits an :class:`~iris.coords.AuxCoord` to contain masked points/bounds, and a TypeError exception is now - raised when attempting to create or set the points/bounds of a - :class:`~iris.coords.DimCoord` with arrays with missing points. -* :class:`iris.coord_systems.VerticalPerspective` coordinate system now uses - the `CF Vertical perspective definition `_; had been - erroneously using Geostationary. -* :class:`~iris.coords.CellMethod` will now only use valid `NetCDF name tokens `_ to reference the coordinates involved in the statistical operation. -* The following var_name properties will now only allow valid `NetCDF name - tokens - `_ to - reference the said NetCDF variable name. Note that names with a leading - underscore are not permitted. - - :attr:`iris.aux_factory.AuxCoordFactory.var_name` - - :attr:`iris.coords.CellMeasure.var_name` - - :attr:`iris.coords.Coord.var_name` - - :attr:`iris.coords.AuxCoord.var_name` - - :attr:`iris.cube.Cube.var_name` -* Rendering a cube in Jupyter will no longer crash for a cube with - attributes containing ``\n``. -* NetCDF variables which reference themselves in their ``cell_measures`` - attribute can now be read. -* :func:`~iris.plot.quiver` now handles circular coordinates. -* The names of cubes loaded from abf/abl files have been corrected. -* Fixed a bug in UM file loading, where any landsea-mask-compressed fields - (i.e. with LBPACK=x2x) would cause an error later, when realising the data. -* :meth:`iris.cube.Cube.collapsed` now handles partial collapsing of - multidimensional coordinates that have bounds. -* Fixed a bug in the :data:`~iris.analysis.PROPORTION` aggregator, where cube - data in the form of a masked array with ``array.mask=False`` would cause an - error, but possibly only later when the values are actually realised. - ( Note: since netCDF4 version 1.4.0, this is now a common form for data - loaded from netCDF files ). -* Fixed a bug where plotting a cube with a - :class:`iris.coord_systems.LambertConformal` coordinate system would result - in an error. This would happen if the coordinate system was defined with one - standard parallel, rather than two. - In these cases, a call to - :meth:`~iris.coord_systems.LambertConformal.as_cartopy_crs` would fail. -* :meth:`iris.cube.Cube.aggregated_by` now gives correct values in points and - bounds when handling multidimensional coordinates. -* Fixed a bug in the :meth:`iris.cube.Cube.collapsed` operation, which caused - the unexpected realization of any attached auxiliary coordinates that were - *bounded*. It now correctly produces a lazy result and does not realise - the original attached AuxCoords. - - -Documentation Changes -===================== -* Added a gallery example showing `how to concatenate NEMO ocean model data - <../examples/Oceanography/load_nemo.html>`_. -* Added an example in the - `Loading Iris Cubes: Constraining on Time <../userguide/loading_iris_cubes - .html#constraining-on-time>`_ - Userguide section, demonstrating how to load data within a specified date - range. -* Added notes to the :func:`iris.load` documentation, and the userguide - `Loading Iris Cubes <../userguide/loading_iris_cubes.html>`_ - chapter, emphasizing that the *order* of the cubes returned by an iris load - operation is effectively random and unstable, and should not be relied on. -* Fixed references in the documentation of - :func:`iris.util.find_discontiguities` to a nonexistent - "mask_discontiguities" routine : these now refer to - :func:`~iris.util.mask_cube`. - diff --git a/docs/iris/src/whatsnew/aggregate_directory.py b/docs/iris/src/whatsnew/aggregate_directory.py deleted file mode 100644 index fca098f4d4..0000000000 --- a/docs/iris/src/whatsnew/aggregate_directory.py +++ /dev/null @@ -1,322 +0,0 @@ -# (C) British Crown Copyright 2015 - 2019, Met Office -# -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . -""" -Build a release file from files in a contributions directory. - -Looks for directories "<...whatsnew>/contributions_". -Takes specified "xx.xx" as version, or latest found (alphabetic). -Writes a file "<...whatsnew>/.rst". - -Valid contributions filenames are of the form: - __summary.txt -Where can be any valid chars, and - is one of : - "newfeature" "bugfix" "incompatiblechange" "deprecate" "docchange", and - is in the style "2001-Jan-23". - -""" - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa -import datetime -from glob import glob -import os -import re -import argparse -import warnings -from operator import itemgetter -from distutils import version - -# Regular expressions: CONTRIBUTION_REGEX matches the filenames of -# contribution snippets. It is split into three sections separated by _ -# 0. String for the category. 1. ISO8601 date. 2. String for the feature name. -# RELEASE_REGEX matches the directory names, returning the release. -CONTRIBUTION_REGEX_STRING = r'(?P.*)' -CONTRIBUTION_REGEX_STRING += r'_(?P\d{4}-\w{3}-\d{2})' -CONTRIBUTION_REGEX_STRING += r'_(?P.*)\.txt$' -CONTRIBUTION_REGEX = re.compile(CONTRIBUTION_REGEX_STRING) -RELEASEDIR_PREFIX = r'contributions_' -_RELEASEDIR_REGEX_STRING = RELEASEDIR_PREFIX + r'(?P.*)$' -RELEASE_REGEX = re.compile(_RELEASEDIR_REGEX_STRING) -SOFTWARE_NAME = 'Iris' -EXTENSION = '.rst' -VALID_CATEGORIES = [ - {'Prefix': 'newfeature', 'Title': 'Features'}, - {'Prefix': 'bugfix', 'Title': 'Bugs Fixed'}, - {'Prefix': 'incompatiblechange', 'Title': 'Incompatible Changes'}, - {'Prefix': 'deprecate', 'Title': 'Deprecations'}, - {'Prefix': 'docchange', 'Title': 'Documentation Changes'} -] -VALID_CATEGORY_PREFIXES = [cat['Prefix'] for cat in VALID_CATEGORIES] - - -def _self_root_directory(): - return os.path.abspath(os.path.dirname(__file__)) - - -def _decode_contribution_filename(file_name): - file_name_elements = CONTRIBUTION_REGEX.match(file_name) - category = file_name_elements.group('category') - if category not in VALID_CATEGORY_PREFIXES: - # This is an error - raise ValueError('Unknown category in contribution filename.') - isodate = file_name_elements.group('isodate') - date_of_item = datetime.datetime.strptime(isodate, '%Y-%b-%d').date() - return category, isodate, date_of_item - - -def is_release_directory(directory_name, release): - '''Returns True if a given directory name matches the requested release.''' - result = False - directory_elements = RELEASE_REGEX.match(directory_name) - try: - release_string = directory_elements.group('release') - directory_release = version.StrictVersion(release_string) - except (AttributeError, ValueError): - pass - else: - if directory_release == release: - result = True - return result - - -def is_compiled_release(root_directory, release): - '''Returns True if the requested release.rst file exists.''' - result = False - compiled_filename = '{!s}{}'.format(release, EXTENSION) - compiled_filepath = os.path.join(root_directory, compiled_filename) - if os.path.exists(compiled_filepath) and os.path.isfile(compiled_filepath): - result = True - return result - - -def get_latest_release(root_directory=None): - """ - Implement default=latest release identification. - - Returns a valid release code. - - """ - if root_directory is None: - root_directory = _self_root_directory() - directory_contents = os.listdir(root_directory) - # Default release to latest visible dir. - possible_release_dirs = [releasedir_name - for releasedir_name in directory_contents - if RELEASE_REGEX.match(releasedir_name)] - if len(possible_release_dirs) == 0: - dirspec = os.path.join(root_directory, RELEASEDIR_PREFIX + '*') - msg = 'No valid release directories found, i.e. {!r}.' - raise ValueError(msg.format(dirspec)) - release_dirname = sorted(possible_release_dirs)[-1] - release = RELEASE_REGEX.match(release_dirname).group('release') - return release - - -def find_release_directory(root_directory, release=None, - fail_on_existing=True): - ''' - Returns the matching contribution directory or raises an exception. - - Defaults to latest-found release (from release directory names). - Optionally, fail if the matching release file already exists. - *Always* fail if no release directory exists. - - ''' - if release is None: - # Default to latest release. - release = get_latest_release(root_directory) - - if fail_on_existing: - compiled_release = is_compiled_release(root_directory, release) - if compiled_release: - msg = ('Specified release {!r} is already compiled : ' - '{!r} already exists.') - compiled_filename = str(release) + EXTENSION - raise ValueError(msg.format(release, compiled_filename)) - - directory_contents = os.listdir(root_directory) - result = None - for inode in directory_contents: - node_path = os.path.join(root_directory, inode) - if os.path.isdir(node_path): - release_directory = is_release_directory(inode, release) - if release_directory: - result = os.path.join(root_directory, inode) - break - if not result: - msg = 'Contribution folder for release {!s} does not exist : no {!r}.' - release_dirname = RELEASEDIR_PREFIX + str(release) + '/' - release_dirpath = os.path.join(root_directory, release_dirname) - raise ValueError(msg.format(release, release_dirpath)) - return result - - -def generate_header(release, unreleased=False): - '''Return a list of text lines that make up a header for the document.''' - if unreleased: - isodatestamp = '' - else: - isodatestamp = datetime.date.today().strftime('%Y-%m-%d') - header_text = [] - title_template = 'What\'s New in {} {!s}\n' - title_line = title_template.format(SOFTWARE_NAME, release) - title_underline = ('*' * (len(title_line) - 1)) + '\n' - header_text.append(title_line) - header_text.append(title_underline) - header_text.append('\n') - header_text.append(':Release: {!s}\n'.format(release)) - header_text.append(':Date: {}\n'.format(isodatestamp)) - header_text.append('\n') - description_template = 'This document explains the new/changed features '\ - 'of {} in version {!s}\n' - header_text.append(description_template.format(SOFTWARE_NAME, release)) - header_text.append('(:doc:`View all changes `.)') - header_text.append('\n') - return header_text - - -def read_directory(directory_path): - '''Parse the items in a specified directory and return their metadata.''' - directory_contents = os.listdir(directory_path) - compilable_files_unsorted = [] - misnamed_files = [] - for file_name in directory_contents: - try: - category, isodate, date_of_item = \ - _decode_contribution_filename(file_name) - except (AttributeError, ValueError): - misnamed_files.append(file_name) - continue - compilable_files_unsorted.append({'Category': category, - 'Date': date_of_item, - 'FileName': file_name}) - compilable_files = sorted(compilable_files_unsorted, - key=itemgetter('Date'), - reverse=True) - if misnamed_files: - msg = 'Found contribution file(s) with unexpected names :' - for filename in misnamed_files: - full_path = os.path.join(directory_path, filename) - msg += '\n {}'.format(full_path) - warnings.warn(msg, UserWarning) - - return compilable_files - - -def compile_directory(directory, release, unreleased=False): - '''Read in source files in date order and compile the text into a list.''' - if unreleased: - release = '' - source_text = read_directory(directory) - compiled_text = [] - header_text = generate_header(release, unreleased) - compiled_text.extend(header_text) - for count, category in enumerate(VALID_CATEGORIES): - category_text = [] - subtitle_line = '' - if count == 0: - subtitle_line += '{} {!s} '.format(SOFTWARE_NAME, release) - subtitle_line += category['Title'] + '\n' - subtitle_underline = ('=' * (len(subtitle_line) - 1)) + '\n' - category_text.append('\n') - category_text.append(subtitle_line) - category_text.append(subtitle_underline) - category_items = [item for item in source_text - if item['Category'] == category['Prefix']] - if not category_items: - continue - for file_description in category_items: - entry_path = os.path.join(directory, file_description['FileName']) - with open(entry_path, 'r') as content_object: - text = content_object.readlines() - if not text[-1].endswith('\n'): - text[-1] += '\n' - category_text.extend(text) - category_text.append('\n----\n\n') - compiled_text.extend(category_text) - return compiled_text - - -def check_all_contributions_valid(release=None, quiet=False, unreleased=False): - """"Scan the contributions directory for badly-named files.""" - root_directory = _self_root_directory() - # Check there are *some* contributions directory(s), else silently pass. - contribs_spec = os.path.join(root_directory, RELEASEDIR_PREFIX + '*') - if len(glob(contribs_spec)) > 0: - # There are some contributions directories: check latest / specified. - if release is None: - release = get_latest_release() - if not quiet: - msg = 'Checking whatsnew contributions for release "{!s}".' - print(msg.format(release)) - release_directory = find_release_directory(root_directory, release, - fail_on_existing=False) - # Run the directory scan, but convert any warning into an error. - with warnings.catch_warnings(): - warnings.simplefilter('error') - compile_directory(release_directory, release, unreleased) - if not quiet: - print('done.') - - -def run_compilation(release=None, quiet=False, unreleased=False): - '''Write a draft release.rst file given a specified uncompiled release.''' - if release is None: - # This must exist ! - release = get_latest_release() - if not quiet: - msg = 'Building release document for release "{!s}".' - print(msg.format(release)) - root_directory = _self_root_directory() - release_directory = find_release_directory(root_directory, release) - compiled_text = compile_directory(release_directory, release, unreleased) - if unreleased: - compiled_filename = 'latest' + EXTENSION - else: - compiled_filename = str(release) + EXTENSION - compiled_filepath = os.path.join(root_directory, compiled_filename) - with open(compiled_filepath, 'w') as output_object: - for string_line in compiled_text: - output_object.write(string_line) - if not quiet: - print('done.') - - -if __name__ == '__main__': - PARSER = argparse.ArgumentParser() - PARSER.add_argument("release", help="Release number to be compiled", - nargs='?', type=version.StrictVersion) - PARSER.add_argument( - '-c', '--checkonly', action='store_true', - help="Check contribution file names, do not build.") - PARSER.add_argument( - '-u', '--unreleased', action='store_true', - help=("Label the release version as '', " - "and its date as ''.")) - PARSER.add_argument( - '-q', '--quiet', action='store_true', - help="Do not print progress messages.") - ARGUMENTS = PARSER.parse_args() - release = ARGUMENTS.release - unreleased = ARGUMENTS.unreleased - quiet = ARGUMENTS.quiet - if ARGUMENTS.checkonly: - check_all_contributions_valid(release, quiet=quiet, - unreleased=unreleased) - else: - run_compilation(release, quiet=quiet, unreleased=unreleased) diff --git a/docs/iris/src/whatsnew/contributions_3.0.0/deprecate_2019-Oct-14_remove_deprecated_future_flags.txt b/docs/iris/src/whatsnew/contributions_3.0.0/deprecate_2019-Oct-14_remove_deprecated_future_flags.txt deleted file mode 100644 index 5bd2903e9b..0000000000 --- a/docs/iris/src/whatsnew/contributions_3.0.0/deprecate_2019-Oct-14_remove_deprecated_future_flags.txt +++ /dev/null @@ -1,3 +0,0 @@ -* The deprecated :class:`iris.Future` flags `cell_date_time_objects`, - `netcfd_promote`, `netcdf_no_unlimited` and `clip_latitudes` have - been removed. \ No newline at end of file diff --git a/docs/iris/src/whatsnew/index.rst b/docs/iris/src/whatsnew/index.rst deleted file mode 100644 index 179216ccb5..0000000000 --- a/docs/iris/src/whatsnew/index.rst +++ /dev/null @@ -1,31 +0,0 @@ -.. _iris_whatsnew: - -What's new in Iris -****************** - -These "What's new" pages describe the important changes between major -Iris versions. - -.. toctree:: - :maxdepth: 2 - - latest.rst - 3.0.rst - 2.3.rst - 2.2.rst - 2.1.rst - 2.0.rst - 1.13.rst - 1.12.rst - 1.11.rst - 1.10.rst - 1.9.rst - 1.8.rst - 1.7.rst - 1.6.rst - 1.5.rst - 1.4.rst - 1.3.rst - 1.2.rst - 1.1.rst - 1.0.rst diff --git a/docs/iris/src/whitepapers/change_management.rst b/docs/iris/src/whitepapers/change_management.rst deleted file mode 100644 index b279c91b96..0000000000 --- a/docs/iris/src/whitepapers/change_management.rst +++ /dev/null @@ -1,451 +0,0 @@ -.. _change_management: - -Change Management in Iris from the User's perspective -***************************************************** - -As Iris changes, user code will need revising from time to time to keep it -working, or to maintain best practice. At the very least, you are advised to -review existing code to ensure it functions correctly with new releases. - -Here, we define ways to make this as easy as possible. - -.. include:: ../userguide/change_management_goals.txt - - -Key principles you can rely on -============================== - -Iris code editions are published as defined version releases, with a given -major and minor version number in the version name, "major.minor.xxx", -as explained in the :ref:`releases section ` below. - - * Code that currently works should **still work**, and have the same - results and effects, in any subsequent sub-release with the same major - release number. - - * The only time we will make changes that can break existing code is at - a **major release**. - - * At a major release, code that works **and emits no deprecation warnings** - in the latest previous (minor) release should still work, and have the - same results and effects. - - -**What can possibly go wrong ?** - -If your code produces :ref:`deprecation warnings `, then it -*could* behave differently, or fail, at the next major release. - - - -User Actions : How you should respond to changes and releases -============================================================= - -Checklist : - -* when a new **testing or candidate version** is announced - if convenient, test your working legacy code against it and report any problems. - -* when a new **minor version is released** - - * review the 'Whats New' documentation to see if it introduces any - deprecations that may affect you. - * run your working legacy code and check for any deprecation warnings, - indicating that modifications may be necessary at some point - * when convenient : - - * review existing code for use of deprecated features - * rewrite code to replace deprecated features - -* when a new major version is **announced** - ensure your code runs, without producing deprecation warnings, in the - previous minor release - -* when a new major version is **released** - check for new deprecation warnings, as for a minor release - - -Details -======= - -The Iris change process aims to minimise the negative effects of change, by -providing : - - * defined processes for release and change management - * release versioning - * backwards code compatibility through minor version releases - * a way to ensure compatibility with a new major version release - * deprecation notices and warnings to highlight all impending changes - -Our practices are intended be compatible with the principles defined in the -`SemVer project `_ . - -Key concepts covered here: - * :ref:`Release versions ` - * :ref:`Backwards compatibility ` - * :ref:`Deprecation ` - - -.. _iris_backward_compatibility: - -Backwards compatibility ------------------------ - -"Backwards-compatible" changes are those that leave any existing valid API -usages unchanged (see :ref:`terminology ` below). -Minor releases may only include backwards-compatible changes. - -The following are examples of backward-compatible changes : - - * changes to documentation - * adding to a module : new submodules, functions, classes or properties - * adding to a class : new methods or properties - * adding to a function or method : new **optional** arguments or keywords - -The following are examples of **non-** backward-compatible changes : - - * removing (which includes *renaming*) any public module or submodule - * removing any public component : a module, class, method, function or - data object property of a public API component - * removing any property of a public object - * removing an argument or keyword from a method or function - * adding a required argument to a method or function - * removing a keyword (even one that has no effect) - * changing the effect of *any* particular combination of arguments and/or - keywords - -Note that it is also possible to modify the behaviour of an existing usage by -making it depend on a newly-defined external control variable. This is -effectively a change to the 'default behaviour' of a specific usage. Although -this seems similar to adding a keyword, the cases where the new behaviour -operates and where it does not are not distinguishable by a different code -usage, which makes this somewhat dangerous. We do use this type of change, -but any behaviour 'mode' controls of this sort are usually added as part of the -:class:`iris.Future` definition. -See :ref:`Usage of iris.FUTURE `, below. - - -.. _iris_api: - -Terminology : API, features, usages and behaviours -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The API is the components of the iris module and its submodules which are -"public" : In Python, by convention, this normally means everything whose name -does not have a single leading underscore "_". -This applies to all public modules and their properties : submodules, contained -public classes, data and properties, functions and methods. -An exception is when a module or class defines an '__all__' property : In -that case, the public aspects are just the ones listed there. - -Note: these are standard Python conventions, not specific to Iris. -See: `PEP8 `_. - -The Iris project considers all public API features as "supported", which means -that we will not change or remove them without deprecation, and will undertake -to fix any bugs discovered. -We do however make an important exception for the content of the 'experimental' -module : APIs in :mod:`iris.experimental` module are published for initial -evaluation and feedback, and can be revised or removed without warning at a -subsequent release. - -A "feature" of the API includes public objects as above, but may also be used -more loosely to indicate a class or mode of behaviour, for example when a -keyword has a specific value, like "interpolate(mode='linear')". - -A "usage" is any code referring to public API elements, for example : - - * `print(iris.thing)` - * `iris.submodule.call(arg1)` - * `iris.module.call(arg1, arg2, *more_args)` - * `iris.module.call(arg1, arg2, control=3)` - * `x = iris.module.class(arg, key=4)` - -A "behaviour" is whatever Iris does when you invoke a particular API usage, -encompassing both returned values and any side effects such as code state -changes or data written to files. - -As the above code examples are all public feature usages, they should -therefore continue to work, with the same behaviours, at least until the next -**major** version release. - -.. Note:: - What is the "same" behaviour, for backwards-compatibility purposes ? - - Unfortunately, the guarantee to preserve "what Iris does" within a major - version cannot ensure *totally* identical and repeatable behaviour for any - possible usage, because this can also depend on the exact installed - versions of all dependencies (i.e. the other Python modules and system - libraries that Iris uses). - - See :ref:`iris_dependency_versions`. - - Minor-release code changes are backwards-compatible, meaning that they must - result in "the same" actions from Iris. - Ultimately, however, this is only a statement of intent, as we need freedom - to modify code with "no change" effects, yet *any* change to Iris code or - dependencies could alter total behaviour in some respects : - For instance, it could take more or less time or memory, produce results in - a different sequence, or perform a calculation slightly differently (with - possible small differences in floating point results). - - As behaviour can only be tested in specific ways on a specific - installation, so any given user installation may experience changes in - behaviour, though hopefully always slight, with a minor release change. - This applies to changes in the Iris minor release version, or a different - version of some dependency; or other changes to the operating system - environment. - - -.. _iris_change_releases: - -Releases and Versions ---------------------- - - -Iris releases have a unique identifying version string, in the form -"..", available to code as -:data:`iris.__version__` . - -This contains major and minor release numbers. The numbering and meaning of -these are defined, following the `SemVer project `_. - -The essential aspects of the ".." arrangement -are : - - * "", "" and "" are all integers, thus version - 2.12 is later than 2.2 (i.e. it is "two point twelve", not "two point one - two"). - - * "." denote the software release version. - - * A non-zero "" denotes a bugfix version, thus a release "X.Y.0" may - be followed by "X.Y.1", "X.Y.2" etc, which *only* differ by containing - bugfixes. Any bugfix release supercedes its predecessors, and does not - change any (valid) APIs or behaviour : hence, it is always advised to - replace a given version with its latest bugfix successor, and there - should be no reason not to. - - * "" is blank for formal releases. It used to indicate - provisional software for testing : The version string in a development - code branch is always labelled "-DEV", and release candidates for testing - during the release process are labelled "-rc1", "-rc2" etc. - For development code, the version number is that of the *next* release, - which this code version is progressing towards, e.g. "1.2-DEV" for all - code branches since the 1.1 release and intended for release in "1.2". - -.. note:: - Our use of "-" is typical, but does not follow strict SemVer - principles. - -The code for a specific release is identified by a git tag which is the version -string : see -:ref:`Developer's Guide section on releases `. - - -Major and Minor Releases -^^^^^^^^^^^^^^^^^^^^^^^^ - -The term "release" refers both to a specific state of the Iris code, which we -have assigned a given version string, *and* the act of defining it -(i.e. we "release a release"). - -According to `SemVer `_ principles, changes that alter the -behaviour of existing code can only be made at a **major** release, i.e. when -"X.0" is released following the last previous "(X-1).Y.Z". - -*Minor* releases, by contrast, consist of bugfixes, new features, and -deprecations : Any valid exisiting code should be unaffected by these, so it -will still run with the same results. - -At a major release, only **deprecated** behaviours and APIs can be changed or -removed. - - -.. _iris_deprecations: - -Deprecations ------------- - -A deprecation is issued when we decide that an *existing* feature needs to be -removed or modified : We add notices to the documentation, and issue a runtime -"Deprecation Warning" whenever the feature is used. - -For a wider perspective, see : ``_. -For the developer view of this, see -:ref:`Developer's Guide section on deprecations `. - -Deprecation manages incompatible changes in a strictly controlled way. -This allows APIs to evolve to the most effective form, even when that means -that existing code could behave differently or fail : This is important -because the freedom to remove features helps prevent the API becoming -progressively cluttered, and modifying existing behaviours allows us to use -the most natural code syntax for the most used features. - -We can only remove features or change behaviours at a major release. Thus, we -first deprecate the feature in a minor release, to provide adequate warning -that existing code may need to be modified. - -When we make a release that introduces a deprecation : - - * a deprecation notice appears in the - :ref:`What's New section ` - * deprecation notes are included in all relevant parts of the :ref:`reference - documentation ` - * a runtime warning is produced when the old feature is used or triggered - -In most cases, we also provide detailed advice in the documentation and/or -warning messages on how to replace existing usage with a 'new' way of doing -things. -In all cases, we must provide a transitional period where both old and new -features are available : - - * the 'old' style works as it did before - * any usage of the 'old' features will emit a - :class:`warnings.WarningMessage` message, noting that the feature is - deprecated and what to use instead - * the 'new' style can be adopted as soon as convenient - -This is to warn users : - - * not to use the deprecated features in any new code, *and* - * eventually to rewrite old code to use the newer or better alternatives - - -Deprecated features support through the Release cycle -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The whole point of a deprecation is that the feature continues to work, but -with a warning, for some time before an unavoidable change occurs. -When a version that introduces a deprecation is released, the effects are as -follows: - - * code that may be affected by the proposed change will result in - deprecation warnings - * code that currently works will, however, continue to work unchanged, at - least until the next major release - * you can avoid all deprecation warnings by suitable changes to your code - * code which uses no deprecated features, and thus produces no deprecation - warnings, will continue to work unchanged even at a **major** release - * code that generates deprecation warnings may cease to work at the next - **major** release. - - -.. _iris_future_usage: - -Future options, `iris.FUTURE` -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -A special approach is needed where the replacement behaviour is not controlled -by a distinct API usage. - -When we extend an API, or add a new feature, we usually add a new method or -keyword. In those cases, code using the new feature is clearly distinct from -any previous (valid) usage, so this is relatively simple to manage. -However, sometimes we really need to change the *way* an API works, without -modifying or extending (i.e. complicating) the existing user interface. -In that case, existing user code might sometimes have *different* behaviour -with the new release, which we obviously need to avoid. - -**For example :** - - We might decide there is a more useful way of loading cubes from files of a - particular input data format. - - * the user code usage is simply by calls to "iris.load" - * the change is not a bugfix, as the old way isn't actually "wrong" - * we don't want to add an extra keyword into all the relevant calls - * we don't see a longterm future for the existing behaviour : we - expect everyone to adopt the new interpretation, eventually - -For changes of this sort, the release will define a new boolean property of the -:data:`iris.FUTURE` object, as a control to select between the 'old' and 'new' -behaviours, with values False='old' and True='new'. -See :data:`iris.Future` for examples. - -In these cases, as any "deprecated usage" is not clearly distinguishable in the -form of the user code, it is **especially** important to take note of any -deprecation messages appearing when legacy code runs. - - -**Sequence of changes to `iris.FUTURE`** - -To allow user code to avoid unexpected any behavioural changes, the -:data:`iris.Future` controls follow a special management cycle, as follows -(see also the relevant :ref:`Developer Guide section `): - -At (minor) release "...": - * Changes to API: - * the new behaviour is made available, alongside the old one - - * a new future option `iris.FUTURE.` is provided to switch - between them. - - * the new option defaults to `iris.FUTURE.=False`, meaning - the 'old' behaviour is the default. - - * when any relevant API call is made that invokes the old behaviour, a - deprecation warning is emitted. - - * User actions: - - * If your code encounters the new deprecation warning, you should try - enabling the new control option, and make any necessary rewrites to - make it work. This will stop the deprecation warning appearing. - - * If you encounter problems making your code work with the new - behaviour, and don't have time to fix them, you should make your - code explicitly turn *off* the option for now, i.e. :: - `iris.FUTURE. = False`. - This locks you into the old behaviour, but your code will continue - to work, even beyond the next major release when the default - behaviour will change (see on). - -At (major) release ".0...": - * Changes to API: - * the control default is changed to `iris.FUTURE.=True` - - * the control property is *itself* deprecated, so that assigning to it - now results in a deprecation warning. - - * when any affected API call is made, a deprecation warning is (still) - emitted, if the old behaviour is in force. The "old" behaviour is, - however, still available and functional. - - * User actions: - - * If your code is already using the "new" behaviour, it will now work - without needing to set the Future option. *You should remove* the - code which enables the option, as this will now emit a deprecation - message. In the *next* major release, this would cause an error. - - * If your code is explicitly turning the option off, it will continue - to work in the same way at this point, but obviously time is - runnning out. - - * If your code is still using the old behaviour and *not* setting the - control option at all, its behaviour might now have changed - unexpectedly and you should review this. - -At (major) release "...": - * Changes to API: - * the control property is removed - * the "old" behaviour is removed - - - -.. _iris_dependency_versions: - -Versions of Installed Dependencies -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The dependencies of Iris (required and optional) are defined in -:ref:`installing_iris`. - -Iris does not specify exact required versions for its dependencies, but it may -specify a minimum acceptable version number. Iris is normally expected to be -compatible with *any* version up to the latest current release. - -When a new release of a dependency is found to cause problems, Iris may define -the supported version more precisely, but this would be a temporary fix which -should be removed in a later release. diff --git a/docs/iris/src/whitepapers/index.rst b/docs/iris/src/whitepapers/index.rst deleted file mode 100644 index dd0876d257..0000000000 --- a/docs/iris/src/whitepapers/index.rst +++ /dev/null @@ -1,12 +0,0 @@ -.. _whitepapers_index: - -============================ -Iris technical 'Whitepapers' -============================ -Extra information on specific technical issues. - -.. toctree:: - :maxdepth: 1 - - um_files_loading.rst - missing_data_handling.rst diff --git a/docs/iris/src/IEP/IEP001.adoc b/docs/src/IEP/IEP001.adoc similarity index 82% rename from docs/iris/src/IEP/IEP001.adoc rename to docs/src/IEP/IEP001.adoc index d38b2e8478..2daef2363a 100644 --- a/docs/iris/src/IEP/IEP001.adoc +++ b/docs/src/IEP/IEP001.adoc @@ -119,7 +119,7 @@ cube.sel(height=1.5) The semantics of position-based slices will continue to match that of normal Python slices. The start position is included, the end position is excluded. -Value-based slices will be stricly inclusive, with both the start and end values included. This behaviour differs from normal Python slices but is in common with pandas. +Value-based slices will be strictly inclusive, with both the start and end values included. This behaviour differs from normal Python slices but is in common with pandas. Just as for normal Python slices, we do not need to provide the ability to control the include/exclude behaviour for slicing. @@ -162,12 +162,12 @@ There is a risk that this topic could bog down when dealing with non-standard ca * Boolean array indexing * Lambdas? * What to do about constrained loading? -* Relationship to http://scitools.org.uk/iris/docs/v1.9.2/iris/iris/cube.html#iris.cube.Cube.intersection[iris.cube.Cube.intersection]? +* Relationship to https://scitools.org.uk/iris/docs/v1.9.2/iris/iris/cube.html#iris.cube.Cube.intersection[iris.cube.Cube.intersection]? * Relationship to interpolation (especially nearest-neighbour)? ** e.g. What to do about values that don't exist? *** pandas throws a KeyError -*** xarray supports (several) nearest-neighbour schemes via http://xarray.pydata.org/en/stable/indexing.html#nearest-neighbor-lookups[`data.sel()`] -*** Apparently http://holoviews.org/[holoviews] does nearest-neighbour interpolation. +*** xarray supports (several) nearest-neighbour schemes via https://xarray.pydata.org/en/stable/indexing.html#nearest-neighbor-lookups[`data.sel()`] +*** Apparently https://holoviews.org/[holoviews] does nearest-neighbour interpolation. * multi-dimensional coordinate => unroll? * var_name only selection? `cube.vloc(t0=12)` * Orthogonal only? Or also independent? `cube.loc_points(lon=[1, 1, 5], lat=[31, 33, 32])` @@ -185,9 +185,9 @@ cube.interpolate( ## References . Iris - * http://scitools.org.uk/iris/docs/v1.9.2/iris/iris.html#iris.Constraint[iris.Constraint] - * http://scitools.org.uk/iris/docs/v1.9.2/userguide/subsetting_a_cube.html[Subsetting a cube] -. http://pandas.pydata.org/pandas-docs/stable/indexing.html[pandas indexing] -. http://xarray.pydata.org/en/stable/indexing.html[xarray indexing] -. http://legacy.python.org/dev/peps/pep-0472/[PEP 472 - Support for indexing with keyword arguments] -. http://nbviewer.jupyter.org/gist/rsignell-usgs/13d7ce9d95fddb4983d4cbf98be6c71d[Time slicing NetCDF or OPeNDAP datasets] - Rich Signell's xarray/iris comparison focussing on time handling and performance + * https://scitools.org.uk/iris/docs/v1.9.2/iris/iris.html#iris.Constraint[iris.Constraint] + * https://scitools.org.uk/iris/docs/v1.9.2/userguide/subsetting_a_cube.html[Subsetting a cube] +. https://pandas.pydata.org/pandas-docs/stable/indexing.html[pandas indexing] +. https://xarray.pydata.org/en/stable/indexing.html[xarray indexing] +. https://legacy.python.org/dev/peps/pep-0472/[PEP 472 - Support for indexing with keyword arguments] +. https://nbviewer.jupyter.org/gist/rsignell-usgs/13d7ce9d95fddb4983d4cbf98be6c71d[Time slicing NetCDF or OPeNDAP datasets] - Rich Signell's xarray/iris comparison focussing on time handling and performance diff --git a/docs/src/Makefile b/docs/src/Makefile new file mode 100644 index 0000000000..8d652878f6 --- /dev/null +++ b/docs/src/Makefile @@ -0,0 +1,75 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +PAPER = +BUILDDIR = _build +SRCDIR = . + +# See https://www.sphinx-doc.org/en/master/man/sphinx-build.html?highlight=--keep-going#cmdoption-sphinx-build-W +WARNING_TO_ERROR = -W --keep-going + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . + +.PHONY: help clean html html-noplot dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest show + +help: + @echo "Please use \`make ' where is one of" + @echo " help to view this help" + @echo " html to make standalone HTML files" + @echo " html-noplot to make standalone HTML files, skip gallery" + @echo " html-noapi to make standalone HTML files, skip the API" + @echo " html-quick to make standalone HTML files, skip the gallery and API" + @echo " clean to remove all built files" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + @echo " linkcheck to check all external links for integrity" + @echo " show to open the built documentation in the default browser" + @echo " livehtml to auto build the docs when any file changes are detected." + @echo " You need to install sphinx-autobuild first:" + @echo " conda install -c conda-forge sphinx-autobuild" + +html: + $(SPHINXBUILD) $(WARNING_TO_ERROR) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html" + +html-noplot: + $(SPHINXBUILD) -D plot_gallery=0 -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML (no gallery docs) pages are in $(BUILDDIR)/html" + +html-noapi: + export SKIP_API=1; $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML (no api docs) pages are in $(BUILDDIR)/html" + +html-quick: + export SKIP_API=1; $(SPHINXBUILD) -D plot_gallery=0 -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML (no gallery or api docs) pages are in $(BUILDDIR)/html" + +clean: + -rm -rf $(BUILDDIR) + -rm -rf $(SRCDIR)/generated + +doctest: + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " + @echo "results in $(BUILDDIR)/doctest/output.txt." + +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " + @echo "or in $(BUILDDIR)/linkcheck/output.txt." + +show: + @python -c "import webbrowser; webbrowser.open_new_tab('file://$(shell pwd)/$(BUILDDIR)/html/index.html')" + +livehtml: + sphinx-autobuild "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) --ignore generated $(O) \ No newline at end of file diff --git a/docs/src/_static/README.md b/docs/src/_static/README.md new file mode 100644 index 0000000000..b9f2877a30 --- /dev/null +++ b/docs/src/_static/README.md @@ -0,0 +1,31 @@ +# Iris logos + +[![iris-logo-title.svg](iris-logo-title.svg)](iris-logo-title.svg) + +Code for generating the logos is at: +[SciTools/marketing/iris/logo/generate_logo.py](https://github.com/SciTools/marketing/blob/master/iris/logo/generate_logo.py) + +See the docstring of the `generate_logo()` function for more information. + +## Why a scripted logo? + +SVG logos are ideal for source-controlled projects: + +* Low file size, with infinitely scaling quality +* Universally recognised vector format, editable by many software packages +* XML-style content = human-readable diff when changes are made + +But Iris' logo is difficult to reproduce/edit using an SVG editor alone: + +* Includes correctly projected, low resolution coastlines +* Needs precise alignment of the 'visual centre' of the iris with the centres + of the Earth and the image + +An SVG image is simply XML format, so can be easily assembled automatically +with a script, which can also be engineered to address the above problems. + +Further advantages of using a script: + +* Parameterised text, making it easy to standardise the logo across all Iris + packages +* Can generate an animated GIF/SVG of a rotating Earth diff --git a/docs/src/_static/icon_api.svg b/docs/src/_static/icon_api.svg new file mode 100644 index 0000000000..bf2f8d67bb --- /dev/null +++ b/docs/src/_static/icon_api.svg @@ -0,0 +1,155 @@ + + + +image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/src/_static/icon_development.svg b/docs/src/_static/icon_development.svg new file mode 100644 index 0000000000..dbc342688c --- /dev/null +++ b/docs/src/_static/icon_development.svg @@ -0,0 +1,63 @@ + + + + + + image/svg+xml + + + + + + + + + + diff --git a/docs/src/_static/icon_instructions.svg b/docs/src/_static/icon_instructions.svg new file mode 100644 index 0000000000..62b3fc3620 --- /dev/null +++ b/docs/src/_static/icon_instructions.svg @@ -0,0 +1,162 @@ + + + +image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/src/_static/icon_new_product.svg b/docs/src/_static/icon_new_product.svg new file mode 100644 index 0000000000..f222e1e066 --- /dev/null +++ b/docs/src/_static/icon_new_product.svg @@ -0,0 +1,182 @@ + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/src/_static/icon_shuttle.svg b/docs/src/_static/icon_shuttle.svg new file mode 100644 index 0000000000..46ba64d2e0 --- /dev/null +++ b/docs/src/_static/icon_shuttle.svg @@ -0,0 +1,71 @@ + + + + + + image/svg+xml + + + + + + + + + + + + diff --git a/docs/src/_static/icon_support.png b/docs/src/_static/icon_support.png new file mode 100644 index 0000000000..567cdb1b2f Binary files /dev/null and b/docs/src/_static/icon_support.png differ diff --git a/docs/src/_static/icon_thumb.png b/docs/src/_static/icon_thumb.png new file mode 100644 index 0000000000..6a14875e22 Binary files /dev/null and b/docs/src/_static/icon_thumb.png differ diff --git a/docs/src/_static/iris-logo-title-dark.svg b/docs/src/_static/iris-logo-title-dark.svg new file mode 100644 index 0000000000..b7358aafec --- /dev/null +++ b/docs/src/_static/iris-logo-title-dark.svg @@ -0,0 +1,107 @@ + + \ No newline at end of file diff --git a/docs/src/_static/iris-logo-title.svg b/docs/src/_static/iris-logo-title.svg new file mode 100644 index 0000000000..98dd1a73d5 --- /dev/null +++ b/docs/src/_static/iris-logo-title.svg @@ -0,0 +1,107 @@ + + \ No newline at end of file diff --git a/docs/src/_static/iris-logo.svg b/docs/src/_static/iris-logo.svg new file mode 100644 index 0000000000..fe49411b45 --- /dev/null +++ b/docs/src/_static/iris-logo.svg @@ -0,0 +1,104 @@ + + \ No newline at end of file diff --git a/docs/src/_static/theme_override.css b/docs/src/_static/theme_override.css new file mode 100644 index 0000000000..355119f8a5 --- /dev/null +++ b/docs/src/_static/theme_override.css @@ -0,0 +1,28 @@ +/* import the standard theme css */ +@import url("styles/theme.css"); + +/* now we can add custom css.... */ + +/* Used for very strong warning */ +#slim-red-box-banner { + background: #ff0000; + box-sizing: border-box; + color: #ffffff; + font-weight: normal; + padding: 0.5em; +} + +#slim-red-box-banner a { + color: #ffffff; + font-weight: normal; + text-decoration: underline; +} + +/* bullet point list with green ticks */ +ul.squarelist { + /* https://developer.mozilla.org/en-US/docs/Web/CSS/list-style-type */ + list-style-type: "\2705"; + margin-left: 0; + text-indent: 1em; + padding-left: 5em; +} diff --git a/docs/src/_templates/custom_footer.html b/docs/src/_templates/custom_footer.html new file mode 100644 index 0000000000..f81fcc583e --- /dev/null +++ b/docs/src/_templates/custom_footer.html @@ -0,0 +1 @@ +

    Built using Python {{ python_version }}.

    diff --git a/docs/src/_templates/custom_sidebar_logo_version.html b/docs/src/_templates/custom_sidebar_logo_version.html new file mode 100644 index 0000000000..c9d9ac6e2e --- /dev/null +++ b/docs/src/_templates/custom_sidebar_logo_version.html @@ -0,0 +1,26 @@ +{% if on_rtd %} + {% if rtd_version == 'latest' %} + + + + {% elif rtd_version == 'stable' %} + + + + {% elif rtd_version_type == 'tag' %} + {# Covers builds for specific tags, including RC's. #} + + + + {% else %} + {# Anything else build by RTD will be the HEAD of an activated branch #} + + + + {% endif %} +{%- else %} + {# not on rtd #} + + + +{%- endif %} diff --git a/docs/src/_templates/imagehash.html b/docs/src/_templates/imagehash.html new file mode 100644 index 0000000000..8b0dac0cce --- /dev/null +++ b/docs/src/_templates/imagehash.html @@ -0,0 +1,15 @@ +{% extends "!layout.html" %} + +{% block body %} + +

    Test: {{ test }}

    + + +{% for hash, file in hashfiles %} +
    +

    {{hash}}

    + +
    +{% endfor %} + +{% endblock %} \ No newline at end of file diff --git a/docs/src/common_links.inc b/docs/src/common_links.inc new file mode 100644 index 0000000000..a49a98bfa6 --- /dev/null +++ b/docs/src/common_links.inc @@ -0,0 +1,84 @@ +.. comment + Common resources in alphabetical order: + +.. _black: https://black.readthedocs.io/en/stable/ +.. _cartopy: https://github.com/SciTools/cartopy +.. _flake8: https://flake8.pycqa.org/en/stable/ +.. _.flake8.yml: https://github.com/SciTools/iris/blob/main/.flake8 +.. _cirrus-ci: https://cirrus-ci.com/github/SciTools/iris +.. _codespell: https://github.com/codespell-project/codespell +.. _conda: https://docs.conda.io/en/latest/ +.. _contributor: https://github.com/SciTools/iris/graphs/contributors +.. _core developers: https://github.com/orgs/SciTools/teams/iris-devs/members +.. _generating ssh keys for GitHub: https://docs.github.com/en/github/authenticating-to-github/adding-a-new-ssh-key-to-your-github-account +.. _GitHub Actions: https://docs.github.com/en/actions +.. _GitHub Help Documentation: https://docs.github.com/en/github +.. _GitHub Discussions: https://github.com/SciTools/iris/discussions +.. _Iris: https://github.com/SciTools/iris +.. _Iris GitHub: https://github.com/SciTools/iris +.. _Iris GitHub Actions: https://github.com/SciTools/iris/actions +.. _iris-sample-data: https://github.com/SciTools/iris-sample-data +.. _iris-test-data: https://github.com/SciTools/iris-test-data +.. _isort: https://pycqa.github.io/isort/ +.. _issue: https://github.com/SciTools/iris/issues +.. _issues: https://github.com/SciTools/iris/issues +.. _legacy documentation: https://github.com/SciTools/scitools.org.uk/tree/master/iris/docs/archive +.. _matplotlib: https://matplotlib.org/stable/ +.. _napolean: https://sphinxcontrib-napoleon.readthedocs.io/en/latest/sphinxcontrib.napoleon.html +.. _nox: https://nox.thea.codes/en/stable/ +.. _New Issue: https://github.com/scitools/iris/issues/new/choose +.. _pre-commit: https://pre-commit.com/ +.. _pull request: https://github.com/SciTools/iris/pulls +.. _pull requests: https://github.com/SciTools/iris/pulls +.. _Read the Docs: https://scitools-iris.readthedocs.io/en/latest/ +.. _readthedocs.yml: https://github.com/SciTools/iris/blob/main/requirements/readthedocs.yml +.. _ruff: https://github.com/astral-sh/ruff +.. _SciTools: https://github.com/SciTools +.. _scitools-iris: https://pypi.org/project/scitools-iris/ +.. _Shapely: https://shapely.readthedocs.io/en/stable/index.html +.. _sphinx: https://www.sphinx-doc.org/en/master/ +.. _sphinx-apidoc: https://github.com/sphinx-contrib/apidoc +.. _test-iris-imagehash: https://github.com/SciTools/test-iris-imagehash +.. _using git: https://docs.github.com/en/github/using-git +.. _requirements: https://github.com/SciTools/iris/tree/main/requirements +.. _CF-UGRID: https://ugrid-conventions.github.io/ugrid-conventions/ +.. _issues on GitHub: https://github.com/SciTools/iris/issues?q=is%3Aopen+is%3Aissue+sort%3Areactions-%2B1-desc +.. _python-stratify: https://github.com/SciTools/python-stratify +.. _iris-esmf-regrid: https://github.com/SciTools-incubator/iris-esmf-regrid +.. _netCDF4: https://github.com/Unidata/netcdf4-python +.. _SciTools Contributor's License Agreement (CLA): https://cla-assistant.io/SciTools/ + + +.. comment + Core developers and prolific contributors (@github names) in alphabetical order: + +.. _@abooton: https://github.com/abooton +.. _@alastair-gemmell: https://github.com/alastair-gemmell +.. _@ajdawson: https://github.com/ajdawson +.. _@bjlittle: https://github.com/bjlittle +.. _@bouweandela: https://github.com/bouweandela +.. _@bsherratt: https://github.com/bsherratt +.. _@corinnebosley: https://github.com/corinnebosley +.. _@cpelley: https://github.com/cpelley +.. _@djkirkham: https://github.com/djkirkham +.. _@DPeterK: https://github.com/DPeterK +.. _@ESadek-MO: https://github.com/ESadek-MO +.. _@esc24: https://github.com/esc24 +.. _@HGWright: https://github.com/HGWright +.. _@jamesp: https://github.com/jamesp +.. _@jonseddon: https://github.com/jonseddon +.. _@jvegasbsc: https://github.com/jvegasbsc +.. _@lbdreyer: https://github.com/lbdreyer +.. _@marqh: https://github.com/marqh +.. _@pelson: https://github.com/pelson +.. _@pp-mo: https://github.com/pp-mo +.. _@QuLogic: https://github.com/QuLogic +.. _@rcomer: https://github.com/rcomer +.. _@rhattersley: https://github.com/rhattersley +.. _@schlunma: https://github.com/schlunma +.. _@stephenworsley: https://github.com/stephenworsley +.. _@tkknight: https://github.com/tkknight +.. _@trexfeathers: https://github.com/trexfeathers +.. _@ukmo-ccbunney: https://github.com/ukmo-ccbunney +.. _@wjbenfold: https://github.com/wjbenfold +.. _@zklaus: https://github.com/zklaus diff --git a/docs/src/community/index.rst b/docs/src/community/index.rst new file mode 100644 index 0000000000..ee227513b3 --- /dev/null +++ b/docs/src/community/index.rst @@ -0,0 +1,60 @@ +.. include:: ../common_links.inc + +.. todo: + consider scientific-python.org + consider scientific-python.org/specs/ + +Iris in the Community +===================== + +Iris aims to be a valuable member of the open source scientific Python +community. + +We listen out for developments in our dependencies and neighbouring projects, +and we reach out to them when we can solve problems together; please feel free +to reach out to us! + +We are aware of our place in the user's wider 'toolbox' - offering unique +functionality and interoperating smoothly with other packages. + +We welcome contributions from all; whether that's an opinion, a 1-line +clarification, or a whole new feature 🙂 + +Quick Links +----------- + +* `GitHub Discussions`_ +* :ref:`Getting involved` +* `Twitter `_ + +Interoperability +---------------- + +There's a big choice of Python tools out there! Each one has strengths and +weaknesses in different areas, so we don't want to force a single choice for your +whole workflow - we'd much rather make it easy for you to choose the right tool +for the moment, switching whenever you need. Below are our ongoing efforts at +smoother interoperability: + +.. not using toctree due to combination of child pages and cross-references. + +* The :mod:`iris.pandas` module +* :doc:`iris_xarray` +* :doc:`phrasebook` + +.. toctree:: + :maxdepth: 1 + :hidden: + + iris_xarray + phrasebook + +Plugins +------- + +Iris can be extended with **plugins**! See below for further information: + +.. toctree:: + :maxdepth: 2 + + plugins diff --git a/docs/src/community/iris_xarray.rst b/docs/src/community/iris_xarray.rst new file mode 100644 index 0000000000..71585d8b9f --- /dev/null +++ b/docs/src/community/iris_xarray.rst @@ -0,0 +1,200 @@ +.. include:: ../common_links.inc + +====================== +Iris ❤️ :term:`Xarray` +====================== + +There is a lot of overlap between Iris and :term:`Xarray`, but some important +differences too. Below is a summary of the most important differences, so that +you can be prepared, and to help you choose the best package for your use case. +See :doc:`phrasebook` for a broad comparison of terminology. + +Overall Experience +------------------ + +Iris is the more specialised package, focused on making it as easy +as possible to work with meteorological and climatological data. Iris +is built to natively handle many key concepts, such as the CF conventions, +coordinate systems and bounded coordinates. Iris offers a smaller toolkit of +operations compared to Xarray, particularly around API for sophisticated +computation such as array manipulation and multi-processing. + +Xarray's more generic data model and community-driven development give it a +richer range of operations and broader possible uses. Using Xarray +specifically for meteorology/climatology may require deeper knowledge +compared to using Iris, and you may prefer to add Xarray plugins +such as :ref:`cfxarray` to get the best experience. Advanced users can likely +achieve better performance with Xarray than with Iris. + +Conversion +---------- +There are multiple ways to convert between Iris and Xarray objects. + +* Xarray includes the :meth:`~xarray.DataArray.to_iris` and + :meth:`~xarray.DataArray.from_iris` methods - detailed in the + `Xarray IO notes on Iris`_. Since Iris evolves independently of Xarray, be + vigilant for concepts that may be lost during the conversion. +* Because both packages are closely linked to the :term:`NetCDF Format`, it is + feasible to save a NetCDF file using one package then load that file using + the other package. This will be lossy in places, as both Iris and Xarray + are opinionated on how certain NetCDF concepts relate to their data models. +* `ncdata `_ is a package which + the Iris development team have developed to manage netcdf data, which can act as an + improved 'bridge' between Iris and Xarray : + +Ncdata can convert Iris cubes to an Xarray dataset, or vice versa, with minimal +overhead and as lossless as possible. + +For example : + +.. code-block:: python + + from ncdata.iris_xarray import cubes_from_xarray, cubes_to_xarray + cubes = cubes_from_xarray(dataset) + xrds = cubes_to_xarray(cubes) + +Ncdata avoids the feature limitations previously mentioned regarding Xarray's +:meth:`~xarray.DataArray.to_iris` and :meth:`~xarray.DataArray.from_iris`, +because it doesn't replicate any logic of either Xarray or Iris. +Instead, it uses the netcdf file interfaces of both to exchange data +"as if" via a netcdf file. So, these conversions *behave* just like exchanging data +via a file, but are far more efficient because they can transfer data without copying +arrays or fetching lazy data. + +Regridding +---------- +Iris and Xarray offer a range of regridding methods - both natively and via +additional packages such as `iris-esmf-regrid`_ and `xESMF`_ - which overlap +in places +but tend to cover a different set of use cases (e.g. Iris handles unstructured +meshes but offers access to fewer ESMF methods). The behaviour of these +regridders also differs slightly (even between different regridders attached to +the same package) so the appropriate package to use depends highly on the +particulars of the use case. + +Plotting +-------- +Xarray and Iris have a large overlap of functionality when creating +:term:`Matplotlib` plots and both support the plotting of multidimensional +coordinates. This means the experience is largely similar using either package. + +Xarray supports further plotting backends through external packages (e.g. Bokeh through `hvPlot`_) +and, if a user is already familiar with `pandas`_, the interface should be +familiar. It also supports some different plot types to Iris, and therefore can +be used for a wider variety of plots. It also has benefits regarding "out of +the box", quick customisations to plots. However, if further customisation is +required, knowledge of matplotlib is still required. + +In both cases, :term:`Cartopy` is/can be used. Iris does more work +automatically for the user here, creating Cartopy +:class:`~cartopy.mpl.geoaxes.GeoAxes` for latitude and longitude coordinates, +whereas the user has to do this manually in Xarray. + +Statistics +---------- +Both libraries are quite comparable with generally similar capabilities, +performance and laziness. Iris offers more specificity in some cases, such as +some more specific unique functions and masked tolerance in most statistics. +Xarray seems more approachable however, with some less unique but more +convenient solutions (these tend to be wrappers to :term:`Dask` functions). + +Laziness and Multi-Processing with :term:`Dask` +----------------------------------------------- +Iris and Xarray both support lazy data and out-of-core processing through +utilisation of Dask. + +While both Iris and Xarray expose :term:`NumPy` conveniences at the API level +(e.g. the `ndim()` method), only Xarray exposes Dask conveniences. For example +:attr:`xarray.DataArray.chunks`, which gives the user direct control +over the underlying Dask array chunks. The Iris API instead takes control of +such concepts and user control is only possible by manipulating the underlying +Dask array directly (accessed via :meth:`iris.cube.Cube.core_data`). + +:class:`xarray.DataArray`\ s comply with `NEP-18`_, allowing NumPy arrays to be +based on them, and they also include the necessary extra members for Dask +arrays to be based on them too. Neither of these is currently possible with +Iris :class:`~iris.cube.Cube`\ s, although an ambition for the future. + +NetCDF File Control +------------------- +(More info: :ref:`netcdf_io`) + +Unlike Iris, Xarray generally provides full control of major file structures, +i.e. dimensions + variables, including their order in the file. It mostly +respects these in a file input, and can reproduce them on output. +However, attribute handling is not so complete: like Iris, it interprets and +modifies some recognised aspects, and can add some extra attributes not in the +input. + +Whereas Iris is primarily designed to handle netCDF data encoded according to +`CF Conventions `_ , this is not so important to Xarray, +which therefore may make it harder to correctly manage this type of data. +While Xarray CF support is not complete, it may improve, and obviously +:ref:`cfxarray` may be relevant here. +There is also relevant documentation +`at this page `_. + +In some particular aspects, CF data is not loaded well (or at all), and in many cases +output is not fully CF compliant (as-per `the cf checker `_). + +* xarray has it's own interpretation of coordinates, which is different from the CF-based + approach in Iris, and means that the use of the "coordinates" attribute in output is + often not CF compliant. +* dates are converted to datetime-like objects internally. There are special features + providing `support for non-standard calendars `_, + however date units may not always be saved correctly. +* CF-style coordinate bounds variables are not fully understood. The CF approach + where bounds variables do not usually define their units or standard_names can cause + problems. Certain files containing bounds variables with more than 2 bounds (e.g. + unstructured data) may not load at all. +* missing points are always represented as NaNs, as-per Pandas usage. + (See :ref:`xarray_missing_data` ). + This means that fill values are not preserved, and that masked integer data is + converted to floats. + The netCDF default fill-values are not supported, so that variables with no + "_FillValue" attribute will have missing points equal to the fill-value + in place of NaNs. By default, output variables generally have ``_FillValue = NaN``. + +Ultimately, however, nearly everything wanted in a particular desired result file +**can** be achieved in Xarray, via provided override mechanisms (`loading keywords`_ +and the '`encoding`_' dictionaries). + +.. _xarray_missing_data: + +Missing Data +------------ +Xarray uses :data:`numpy.nan` to represent missing values and this will support +many simple use cases assuming the data are floats. Iris enables more +sophisticated missing data handling by representing missing values as masks +(:class:`numpy.ma.MaskedArray` for real data and :class:`dask.array.Array` +for lazy data) which allows data to be any data type and to include either/both +a mask and :data:`~numpy.nan`\ s. + +.. _cfxarray: + +`cf-xarray`_ +------------- +Iris has a data model entirely based on :term:`CF Conventions`. Xarray has a +data model based on :term:`NetCDF Format` with cf-xarray acting as translation +into CF. Xarray/cf-xarray methods can be +called and data accessed with CF like arguments (e.g. axis, standard name) and +there are some CF specific utilities (similar +to Iris utilities). Iris tends to cover more of and be stricter about CF. + + +.. seealso:: + + * `Xarray IO notes on Iris`_ + * `Xarray notes on other NetCDF libraries`_ + +.. _Xarray IO notes on Iris: https://docs.xarray.dev/en/stable/user-guide/io.html#iris +.. _Xarray notes on other NetCDF libraries: https://docs.xarray.dev/en/stable/getting-started-guide/faq.html#what-other-netcdf-related-python-libraries-should-i-know-about +.. _loading keywords: https://docs.xarray.dev/en/stable/generated/xarray.open_dataset.html#xarray.open_dataset +.. _encoding: https://docs.xarray.dev/en/stable/user-guide/io.html#writing-encoded-data +.. _xESMF: https://github.com/pangeo-data/xESMF/ +.. _seaborn: https://seaborn.pydata.org/ +.. _hvPlot: https://hvplot.holoviz.org/ +.. _pandas: https://pandas.pydata.org/ +.. _NEP-18: https://numpy.org/neps/nep-0018-array-function-protocol.html +.. _cf-xarray: https://github.com/xarray-contrib/cf-xarray +.. _iris#4994: https://github.com/SciTools/iris/issues/4994 diff --git a/docs/src/community/phrasebook.rst b/docs/src/community/phrasebook.rst new file mode 100644 index 0000000000..bcd91cca83 --- /dev/null +++ b/docs/src/community/phrasebook.rst @@ -0,0 +1,66 @@ +.. include:: ../common_links.inc + +.. _phrasebook: + +Package Phrasebook +=================== + +There are a number of similar packages to Iris, and a lot of these have their own +terminology for similar things. Whether you're coming or going, we hope this might +be a helpful guide to these differences! +Definitions for each can be found in :ref:`glossary`. See also +`Xarray terminology `_. + +.. list-table:: Phrasebook + :widths: 25 25 25 50 + :header-rows: 1 + + * - Iris + - Xarray + - Example + - Notes + * - Non-Lazy + - Eager + - + - Used to relate to functions, rather than the data. + * - Cube + - DataArray + - + - + * - CubeList + - Dataset + - + - Though similar, a CubeList is a simpler object, and is + not a perfect comparison to a Dataset + * - Merge/ Concatenate + - Concatenate + - `Xarray concatenate `_ + - Xarray's concatenate has the capability to largely do what both + Iris merge and Iris concatenate do. However, this is not a perfect comparison, + please see the link for more information. + * - + - Merge + - `Xarray merge `_ + - Xarray's Merge function doesn't map neatly map to any Iris feature. + Please see the link for more information. + * - Scalar Coordinate + - + - + - Iris makes a distinction between scalar coordinates and non-scalar coordinates, + whereas xarray documentation makes a distinction between scalar and non-scalar *data*. + It is possible to make coordinates with scalar data in both Iris and xarray + but only Iris will label such coordinates. + * - AuxCoord + - Non-Dimensional Coordinate + - + - Coordinates in Iris and xarray are categorised using different rules, + and so are not a one-to-one match. + * - DimCoord + - Dimension Coordinate + - + - Coordinates in Iris and xarray are categorised using different rules, + and so are not a one-to-one match. + +---- + +`To top `_ \ No newline at end of file diff --git a/docs/src/community/plugins.rst b/docs/src/community/plugins.rst new file mode 100644 index 0000000000..0d79d64623 --- /dev/null +++ b/docs/src/community/plugins.rst @@ -0,0 +1,68 @@ +.. _namespace package: https://packaging.python.org/en/latest/guides/packaging-namespace-packages/ + +.. _community_plugins: + +Plugins +======= + +Iris supports **plugins** under the ``iris.plugins`` `namespace package`_. +This allows packages that extend Iris' functionality to be developed and +maintained independently, while still being installed into ``iris.plugins`` +instead of a separate package. For example, a plugin may provide loaders or +savers for additional file formats, or alternative visualisation methods. + + +Using plugins +------------- + +Once a plugin is installed, it can be used either via the +:func:`iris.use_plugin` function, or by importing it directly: + +.. code-block:: python + + import iris + + iris.use_plugin("my_plugin") + # OR + import iris.plugins.my_plugin + + +Creating plugins +---------------- + +The choice of a `namespace package`_ makes writing a plugin relatively +straightforward: it simply needs to appear as a folder within ``iris/plugins``, +then can be distributed in the same way as any other package. An example +repository layout: + +.. code-block:: text + + + lib + + iris + + plugins + + my_plugin + - __init__.py + - (more code...) + - README.md + - pyproject.toml + - setup.cfg + - (other project files...) + +In particular, note that there must **not** be any ``__init__.py`` files at +higher levels than the plugin itself. + +The package name - how it is referred to by PyPI/conda, specified by +``metadata.name`` in ``setup.cfg`` - is recommended to include both "iris" and +the plugin name. Continuing this example, its ``setup.cfg`` should include, at +minimum: + +.. code-block:: ini + + [metadata] + name = iris-my-plugin + + [options] + packages = find_namespace: + + [options.packages.find] + where = lib diff --git a/docs/src/conf.py b/docs/src/conf.py new file mode 100644 index 0000000000..70b1063585 --- /dev/null +++ b/docs/src/conf.py @@ -0,0 +1,462 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. + +# -*- coding: utf-8 -*- +# +# Iris documentation build configuration file, created by +# sphinx-quickstart on Tue May 25 13:26:23 2010. +# +# This file is execfile()d with the current directory set to its containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. +# ---------------------------------------------------------------------------- + +"""Config for sphinx.""" + +import datetime +from importlib.metadata import version as get_version +from inspect import getsource +import ntpath +import os +from pathlib import Path +import re +from subprocess import run +import sys +from tempfile import gettempdir +from urllib.parse import quote +import warnings + + +# function to write useful output to stdout, prefixing the source. +def autolog(message): + print("[{}] {}".format(ntpath.basename(__file__), message)) + + +# -- Check for dev make options to build quicker +skip_api = os.environ.get("SKIP_API") + +# -- Are we running on the readthedocs server, if so do some setup ----------- +on_rtd = os.environ.get("READTHEDOCS") == "True" + +# This is the rtd reference to the version, such as: latest, stable, v3.0.1 etc +rtd_version = os.environ.get("READTHEDOCS_VERSION") +if rtd_version is not None: + # Make rtd_version safe for use in shields.io badges. + rtd_version = rtd_version.replace("_", "__") + rtd_version = rtd_version.replace("-", "--") + rtd_version = quote(rtd_version) + +# branch, tag, external (for pull request builds), or unknown. +rtd_version_type = os.environ.get("READTHEDOCS_VERSION_TYPE") + +# For local testing purposes we can force being on RTD and the version +# on_rtd = True # useful for testing +# rtd_version = "latest" # useful for testing +# rtd_version = "stable" # useful for testing +# rtd_version_type = "tag" # useful for testing +# rtd_version = "my_branch" # useful for testing + +if on_rtd: + autolog("Build running on READTHEDOCS server") + + # list all the READTHEDOCS environment variables that may be of use + autolog("Listing all environment variables on the READTHEDOCS server...") + + for item, value in os.environ.items(): + autolog("[READTHEDOCS] {} = {}".format(item, value)) + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. + +# custom sphinx extensions +sys.path.append(os.path.abspath("sphinxext")) + +# add some sample files from the developers guide.. +sys.path.append(os.path.abspath(os.path.join("developers_guide"))) + +# why isn't the iris path added to it is discoverable too? We dont need to, +# the sphinext to generate the api rst knows where the source is. If it +# is added then the travis build will likely fail. + +# -- Project information ----------------------------------------------------- + +project = "Iris" + +# define the copyright information for latex builds. Note, for html builds, +# the copyright exists directly inside "_templates/layout.html" +copyright_years = f"2010 - {datetime.datetime.now().year}" +copyright = f"{copyright_years}, Iris Contributors" +author = "Iris Developers" + +# The version info for the project you're documenting, acts as replacement for +# |version|, also used in various other places throughout the built documents. +version = get_version("scitools-iris") +release = version +autolog(f"Iris Version = {version}") +autolog(f"Iris Release = {release}") + +# -- General configuration --------------------------------------------------- + +# Create a variable that can be inserted in the rst "|copyright_years|". +# You can add more variables here if needed. + +build_python_version = ".".join([str(i) for i in sys.version_info[:3]]) + + +def _dotv(version): + result = version + match = re.match(r"^py(\d+)$", version) + if match: + digits = match.group(1) + if len(digits) > 1: + result = f"{digits[0]}.{digits[1:]}" + return result + + +# Automate the discovery of the python versions tested with CI. +python_support = sorted( + [fname.stem for fname in Path(".").glob("../../requirements/py*.yml")] +) + +if not python_support: + python_support = "unknown Python versions" +elif len(python_support) == 1: + python_support = f"Python {_dotv(python_support[0])}" +else: + rest = ", ".join([_dotv(v) for v in python_support[:-1]]) + last = _dotv(python_support[-1]) + python_support = f"Python {rest} and {last}" + +rst_epilog = f""" +.. |copyright_years| replace:: {copyright_years} +.. |python_version| replace:: {build_python_version} +.. |python_support| replace:: {python_support} +.. |iris_version| replace:: v{version} +.. |build_date| replace:: ({datetime.datetime.now().strftime('%d %b %Y')}) +""" + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named "sphinx.ext.*") or your custom +# ones. +extensions = [ + "sphinx.ext.todo", + "sphinx.ext.duration", + "sphinx.ext.coverage", + "sphinx.ext.viewcode", + "sphinx.ext.autosummary", + "sphinx.ext.doctest", + "sphinx.ext.extlinks", + "sphinx.ext.autodoc", + "sphinx.ext.intersphinx", + "sphinx_copybutton", + "sphinx.ext.napoleon", + "sphinx_design", + "sphinx_gallery.gen_gallery", + "matplotlib.sphinxext.mathmpl", + "matplotlib.sphinxext.plot_directive", +] + +if skip_api == "1": + autolog("Skipping the API docs generation (SKIP_API=1)") +else: + extensions.extend(["sphinxcontrib.apidoc"]) + extensions.extend(["api_rst_formatting"]) + +# -- Napoleon extension ------------------------------------------------------- +# See https://sphinxcontrib-napoleon.readthedocs.io/en/latest/sphinxcontrib.napoleon.html +napoleon_google_docstring = True +napoleon_numpy_docstring = True +napoleon_include_init_with_doc = False +napoleon_include_private_with_doc = False +napoleon_include_special_with_doc = True # includes dunders in api doc +napoleon_use_admonition_for_examples = False +napoleon_use_admonition_for_notes = False +napoleon_use_admonition_for_references = False +napoleon_use_ivar = False +napoleon_use_param = True +napoleon_use_rtype = True +napoleon_use_keyword = True +napoleon_custom_sections = None + +# -- copybutton extension ----------------------------------------------------- +# See https://sphinx-copybutton.readthedocs.io/en/latest/ +copybutton_prompt_text = r">>> |\.\.\. " +copybutton_prompt_is_regexp = True +copybutton_line_continuation_character = "\\" + +# sphinx.ext.todo configuration ----------------------------------------------- +# See https://www.sphinx-doc.org/en/master/usage/extensions/todo.html +todo_include_todos = False +todo_emit_warnings = False + +# sphinx.ext.autodoc configuration -------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#confval-autodoc_default_options +autodoc_default_options = { + "members": True, + "member-order": "alphabetical", + "undoc-members": True, + "private-members": False, + "special-members": False, + "inherited-members": True, + "show-inheritance": True, +} + +# https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#confval-autodoc_typehints +autodoc_typehints = "description" +autosummary_generate = True +autosummary_imported_members = True +autopackage_name = ["iris"] +autoclass_content = "both" +modindex_common_prefix = ["iris"] + +# -- apidoc extension --------------------------------------------------------- +# See https://github.com/sphinx-contrib/apidoc +source_code_root = (Path(__file__).parents[2]).absolute() +module_dir = source_code_root / "lib" +apidoc_module_dir = str(module_dir) +apidoc_output_dir = str(Path(__file__).parent / "generated/api") +apidoc_toc_file = False + +apidoc_excluded_paths = [ + str(module_dir / "iris/tests"), + str(module_dir / "iris/experimental/raster.*"), # gdal conflicts +] + +apidoc_module_first = True +apidoc_separate_modules = True +apidoc_extra_args = [] + +autolog(f"[sphinx-apidoc] source_code_root = {source_code_root}") +autolog(f"[sphinx-apidoc] apidoc_excluded_paths = {apidoc_excluded_paths}") +autolog(f"[sphinx-apidoc] apidoc_output_dir = {apidoc_output_dir}") + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +# -- intersphinx extension ---------------------------------------------------- +# See https://www.sphinx-doc.org/en/master/usage/extensions/intersphinx.html +intersphinx_mapping = { + "cartopy": ("https://scitools.org.uk/cartopy/docs/latest/", None), + "cf_units": ("https://cf-units.readthedocs.io/en/stable/", None), + "cftime": ("https://unidata.github.io/cftime/", None), + "dask": ("https://docs.dask.org/en/stable/", None), + "geovista": ("https://geovista.readthedocs.io/en/latest/", None), + "iris-esmf-regrid": ("https://iris-esmf-regrid.readthedocs.io/en/stable/", None), + "matplotlib": ("https://matplotlib.org/stable/", None), + "numpy": ("https://numpy.org/doc/stable/", None), + "pandas": ("https://pandas.pydata.org/docs/", None), + "python": ("https://docs.python.org/3/", None), + "pyvista": ("https://docs.pyvista.org/", None), + "scipy": ("https://docs.scipy.org/doc/scipy/", None), +} + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = "sphinx" + +# -- plot_directive extension ------------------------------------------------- +# See https://matplotlib.org/stable/api/sphinxext_plot_directive_api.html#options +plot_formats = [ + ("png", 100), +] + +# -- Extlinks extension ------------------------------------------------------- +# See https://www.sphinx-doc.org/en/master/usage/extensions/extlinks.html + +extlinks = { + "issue": ("https://github.com/SciTools/iris/issues/%s", "Issue #%s"), + "pull": ("https://github.com/SciTools/iris/pull/%s", "PR #%s"), + "discussion": ( + "https://github.com/SciTools/iris/discussions/%s", + "Discussion #%s", + ), +} + +# -- Doctest ("make doctest")-------------------------------------------------- + +doctest_global_setup = "import iris" + +# -- Options for HTML output -------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_favicon = "_static/iris-logo.svg" +html_theme = "pydata_sphinx_theme" + +# See https://pydata-sphinx-theme.readthedocs.io/en/latest/user_guide/configuring.html#configure-the-search-bar-position +html_sidebars = { + "**": [ + "custom_sidebar_logo_version", + "search-field", + "sidebar-nav-bs", + "sidebar-ethical-ads", + ] +} + +# See https://pydata-sphinx-theme.readthedocs.io/en/latest/user_guide/configuring.html +html_theme_options = { + "footer_start": ["copyright", "sphinx-version"], + "footer_end": ["custom_footer"], + "navigation_depth": 3, + "navigation_with_keys": False, + "show_toc_level": 2, + "show_prev_next": True, + "navbar_align": "content", + # removes the search box from the top bar + "navbar_persistent": [], + # TODO: review if 6 links is too crowded. + "header_links_before_dropdown": 6, + "github_url": "https://github.com/SciTools/iris", + "twitter_url": "https://twitter.com/scitools_iris", + # icons available: https://fontawesome.com/v5.15/icons?d=gallery&m=free + "icon_links": [ + { + "name": "GitHub Discussions", + "url": "https://github.com/SciTools/iris/discussions", + "icon": "far fa-comments", + }, + { + "name": "PyPI", + "url": "https://pypi.org/project/scitools-iris/", + "icon": "fas fa-box", + }, + { + "name": "Conda", + "url": "https://anaconda.org/conda-forge/iris", + "icon": "fas fa-boxes", + }, + ], + "use_edit_page_button": True, + # Omit `theme-switcher` from navbar_end below to disable it + # Info: https://pydata-sphinx-theme.readthedocs.io/en/stable/user_guide/light-dark.html#configure-default-theme-mode + # "navbar_end": ["navbar-icon-links"], + # https://pydata-sphinx-theme.readthedocs.io/en/v0.11.0/user_guide/branding.html#different-logos-for-light-and-dark-mode + "logo": { + "image_light": "_static/iris-logo-title.svg", + "image_dark": "_static/iris-logo-title-dark.svg", + }, +} + +# if we are building via Read The Docs and it is the latest (not stable) +if on_rtd and rtd_version == "latest": + html_theme_options["announcement"] = f""" + You are viewing the latest unreleased documentation + {version}. You can switch to a + stable + version.""" + +rev_parse = run(["git", "rev-parse", "--short", "HEAD"], capture_output=True) +commit_sha = rev_parse.stdout.decode().strip() + +html_context = { + # pydata_theme + "github_repo": "iris", + "github_user": "scitools", + "github_version": "main", + "doc_path": "docs/src", + # default theme. Also disabled the button in the html_theme_options. + # Info: https://pydata-sphinx-theme.readthedocs.io/en/stable/user_guide/light-dark.html#configure-default-theme-mode + "default_mode": "auto", + # custom + "on_rtd": on_rtd, + "rtd_version": rtd_version, + "rtd_version_type": rtd_version_type, + "version": version, + "copyright_years": copyright_years, + "python_version": build_python_version, + "commit_sha": commit_sha, +} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] +html_style = "theme_override.css" + +# url link checker. Some links work but report as broken, lets ignore them. +# See https://www.sphinx-doc.org/en/1.2/config.html#options-for-the-linkcheck-builder +linkcheck_ignore = [ + "https://catalogue.ceda.ac.uk/uuid/82adec1f896af6169112d09cc1174499", + "https://cfconventions.org", + "https://code.google.com/p/msysgit/downloads/list", + "https://effbot.org", + "https://help.github.com", + "https://docs.github.com", + "https://github.com", + "https://www.personal.psu.edu/cab38/ColorBrewer/ColorBrewer_updates.html", + "https://scitools.github.com/cartopy", + "https://www.wmo.int/pages/prog/www/DPFS/documents/485_Vol_I_en_colour.pdf", + "https://software.ac.uk/how-cite-software", + "https://www.esrl.noaa.gov/psd/data/gridded/conventions/cdc_netcdf_standard.shtml", + "https://www.nationalarchives.gov.uk/doc/open-government-licence", + "https://www.metoffice.gov.uk/", + "https://biggus.readthedocs.io/", + "https://stickler-ci.com/", + "https://twitter.com/scitools_iris", + "https://stackoverflow.com/questions/tagged/python-iris", + "https://www.flaticon.com/", +] + +# list of sources to exclude from the build. +exclude_patterns = [] + +# -- sphinx-gallery config ---------------------------------------------------- +# See https://sphinx-gallery.github.io/stable/configuration.html + + +def reset_modules(gallery_conf, fname): + """Force re-registering of nc-time-axis with matplotlib for each example. + + Required for sphinx-gallery>=0.11.0. + """ + from sys import modules + + _ = modules.pop("nc_time_axis", None) + + +# https://sphinx-gallery.github.io/dev/configuration.html#importing-callables +reset_modules_dir = Path(gettempdir()) / reset_modules.__name__ +reset_modules_dir.mkdir(exist_ok=True) +(reset_modules_dir / f"{reset_modules.__name__}.py").write_text( + getsource(reset_modules) +) +sys.path.insert(0, str(reset_modules_dir)) + + +sphinx_gallery_conf = { + # path to your example scripts + "examples_dirs": ["../gallery_code"], + # path to where to save gallery generated output + "gallery_dirs": ["generated/gallery"], + # filename pattern for the files in the gallery + "filename_pattern": "/plot_", + # filename pattern to ignore in the gallery + "ignore_pattern": r"__init__\.py", + # force gallery building, unless overridden (see src/Makefile) + "plot_gallery": "'True'", + "reset_modules": f"{reset_modules.__name__}.{reset_modules.__name__}", +} + +# ----------------------------------------------------------------------------- +# Remove warnings +warnings.filterwarnings("ignore") + +# -- numfig options (built-in) ------------------------------------------------ +# Enable numfig. +numfig = True + +numfig_format = { + "code-block": "Example %s", + "figure": "Figure %s", + "section": "Section %s", + "table": "Table %s", +} diff --git a/docs/src/copyright.rst b/docs/src/copyright.rst new file mode 100644 index 0000000000..b0d68cfe8c --- /dev/null +++ b/docs/src/copyright.rst @@ -0,0 +1,36 @@ + +Iris Copyright, Licensing and Contributors +========================================== + +Iris Code +--------- + +All Iris source code, unless explicitly stated, is ``Copyright Iris +contributors`` and is licensed under the **BSD-3 License**. +You should find all source files with the following header: + +.. admonition:: Code License + + Copyright Iris contributors + + This file is part of Iris and is released under the BSD license. + See LICENSE in the root of the repository for full licensing details. + + +Iris Documentation and Examples +------------------------------- + +All documentation, examples and sample data found on this website and in source repository +are licensed under the UK's Open Government Licence: + +.. admonition:: Documentation, example and data license + + (C) British Crown Copyright |copyright_years| + + You may use and reuse the information featured on this website (not including logos) free of + charge in any format or medium, under the terms of the + `Open Government Licence `_. + We encourage users to establish hypertext links to this website. + + Any email enquiries regarding the use and reuse of this information resource should be + sent to: psi@nationalarchives.gsi.gov.uk. diff --git a/docs/src/developers_guide/assets/developer-settings-github-apps.png b/docs/src/developers_guide/assets/developer-settings-github-apps.png new file mode 100644 index 0000000000..a63994d087 Binary files /dev/null and b/docs/src/developers_guide/assets/developer-settings-github-apps.png differ diff --git a/docs/src/developers_guide/assets/download-pem.png b/docs/src/developers_guide/assets/download-pem.png new file mode 100644 index 0000000000..cbceb1304d Binary files /dev/null and b/docs/src/developers_guide/assets/download-pem.png differ diff --git a/docs/src/developers_guide/assets/generate-key.png b/docs/src/developers_guide/assets/generate-key.png new file mode 100644 index 0000000000..ac894dc71b Binary files /dev/null and b/docs/src/developers_guide/assets/generate-key.png differ diff --git a/docs/src/developers_guide/assets/gha-token-example.png b/docs/src/developers_guide/assets/gha-token-example.png new file mode 100644 index 0000000000..cba1cf6935 Binary files /dev/null and b/docs/src/developers_guide/assets/gha-token-example.png differ diff --git a/docs/src/developers_guide/assets/install-app.png b/docs/src/developers_guide/assets/install-app.png new file mode 100644 index 0000000000..31259de588 Binary files /dev/null and b/docs/src/developers_guide/assets/install-app.png differ diff --git a/docs/src/developers_guide/assets/install-iris-actions.png b/docs/src/developers_guide/assets/install-iris-actions.png new file mode 100644 index 0000000000..db16dee55b Binary files /dev/null and b/docs/src/developers_guide/assets/install-iris-actions.png differ diff --git a/docs/src/developers_guide/assets/installed-app.png b/docs/src/developers_guide/assets/installed-app.png new file mode 100644 index 0000000000..ab87032393 Binary files /dev/null and b/docs/src/developers_guide/assets/installed-app.png differ diff --git a/docs/src/developers_guide/assets/iris-actions-secret.png b/docs/src/developers_guide/assets/iris-actions-secret.png new file mode 100644 index 0000000000..f32456d0f2 Binary files /dev/null and b/docs/src/developers_guide/assets/iris-actions-secret.png differ diff --git a/docs/src/developers_guide/assets/iris-github-apps.png b/docs/src/developers_guide/assets/iris-github-apps.png new file mode 100644 index 0000000000..50753532b7 Binary files /dev/null and b/docs/src/developers_guide/assets/iris-github-apps.png differ diff --git a/docs/src/developers_guide/assets/iris-secrets-created.png b/docs/src/developers_guide/assets/iris-secrets-created.png new file mode 100644 index 0000000000..19b0ba11dc Binary files /dev/null and b/docs/src/developers_guide/assets/iris-secrets-created.png differ diff --git a/docs/src/developers_guide/assets/iris-security-actions.png b/docs/src/developers_guide/assets/iris-security-actions.png new file mode 100644 index 0000000000..7cbe3a7dc2 Binary files /dev/null and b/docs/src/developers_guide/assets/iris-security-actions.png differ diff --git a/docs/src/developers_guide/assets/iris-settings.png b/docs/src/developers_guide/assets/iris-settings.png new file mode 100644 index 0000000000..70714235c2 Binary files /dev/null and b/docs/src/developers_guide/assets/iris-settings.png differ diff --git a/docs/src/developers_guide/assets/org-perms-members.png b/docs/src/developers_guide/assets/org-perms-members.png new file mode 100644 index 0000000000..99fd8985e2 Binary files /dev/null and b/docs/src/developers_guide/assets/org-perms-members.png differ diff --git a/docs/src/developers_guide/assets/repo-perms-contents.png b/docs/src/developers_guide/assets/repo-perms-contents.png new file mode 100644 index 0000000000..4c325c334d Binary files /dev/null and b/docs/src/developers_guide/assets/repo-perms-contents.png differ diff --git a/docs/src/developers_guide/assets/repo-perms-pull-requests.png b/docs/src/developers_guide/assets/repo-perms-pull-requests.png new file mode 100644 index 0000000000..812f5ef951 Binary files /dev/null and b/docs/src/developers_guide/assets/repo-perms-pull-requests.png differ diff --git a/docs/src/developers_guide/assets/scitools-settings.png b/docs/src/developers_guide/assets/scitools-settings.png new file mode 100644 index 0000000000..8d7e728ab5 Binary files /dev/null and b/docs/src/developers_guide/assets/scitools-settings.png differ diff --git a/docs/src/developers_guide/assets/user-perms.png b/docs/src/developers_guide/assets/user-perms.png new file mode 100644 index 0000000000..607c7dcdb6 Binary files /dev/null and b/docs/src/developers_guide/assets/user-perms.png differ diff --git a/docs/src/developers_guide/assets/webhook-active.png b/docs/src/developers_guide/assets/webhook-active.png new file mode 100644 index 0000000000..538362f335 Binary files /dev/null and b/docs/src/developers_guide/assets/webhook-active.png differ diff --git a/docs/src/developers_guide/asv_example_images/commits.png b/docs/src/developers_guide/asv_example_images/commits.png new file mode 100644 index 0000000000..4e0d695322 Binary files /dev/null and b/docs/src/developers_guide/asv_example_images/commits.png differ diff --git a/docs/src/developers_guide/asv_example_images/comparison.png b/docs/src/developers_guide/asv_example_images/comparison.png new file mode 100644 index 0000000000..e146d30696 Binary files /dev/null and b/docs/src/developers_guide/asv_example_images/comparison.png differ diff --git a/docs/src/developers_guide/asv_example_images/scalability.png b/docs/src/developers_guide/asv_example_images/scalability.png new file mode 100644 index 0000000000..260c3ef536 Binary files /dev/null and b/docs/src/developers_guide/asv_example_images/scalability.png differ diff --git a/docs/src/developers_guide/ci_checks.png b/docs/src/developers_guide/ci_checks.png new file mode 100644 index 0000000000..54ab672b3c Binary files /dev/null and b/docs/src/developers_guide/ci_checks.png differ diff --git a/docs/src/developers_guide/contributing_benchmarks.rst b/docs/src/developers_guide/contributing_benchmarks.rst new file mode 100644 index 0000000000..ccb9a50e39 --- /dev/null +++ b/docs/src/developers_guide/contributing_benchmarks.rst @@ -0,0 +1,64 @@ +.. include:: ../common_links.inc + +.. _contributing.benchmarks: + +Benchmarking +============ +Iris includes architecture for benchmarking performance and other metrics of +interest. This is done using the `Airspeed Velocity`_ (ASV) package. + + +.. note:: Full detail on the setup and how to run or write benchmarks is in + `benchmarks/README.md`_ in the Iris repository. + +Continuous Integration +---------------------- +The primary purpose of `Airspeed Velocity`_, and Iris' specific benchmarking +setup, is to monitor for performance changes using statistical comparison +between commits, and this forms part of Iris' continuous integration. + +Accurately assessing performance takes longer than functionality pass/fail +tests, so the benchmark suite is not automatically run against open pull +requests, instead it is **run overnight against each the commits of the +previous day** to check if any commit has introduced performance shifts. +Detected shifts are reported in a new Iris GitHub issue. + +.. _on_demand_pr_benchmark: + +If a pull request author/reviewer suspects their changes may cause performance +shifts, they can manually order their pull request to be benchmarked by adding +the ``benchmark_this`` label to the PR. Read more in `benchmarks/README.md`_. + +Other Uses +---------- +Even when not statistically comparing commits, ASV's accurate execution time +results - recorded using a sophisticated system of repeats - have other +applications. + +* Absolute numbers can be interpreted providing they are recorded on a + dedicated resource. +* Results for a series of commits can be visualised for an intuitive + understanding of when and why changes occurred. + + .. image:: asv_example_images/commits.png + :width: 300 + +* Parameterised benchmarks make it easy to visualise: + + * Comparisons + + .. image:: asv_example_images/comparison.png + :width: 300 + + * Scalability + + .. image:: asv_example_images/scalability.png + :width: 300 + +This also isn't limited to execution times. ASV can also measure memory demand, +and even arbitrary numbers (e.g. file size, regridding accuracy), although +without the repetition logic that execution timing has. + + +.. _Airspeed Velocity: https://github.com/airspeed-velocity/asv +.. _benchmarks/README.md: https://github.com/SciTools/iris/blob/main/benchmarks/README.md diff --git a/docs/src/developers_guide/contributing_changes.rst b/docs/src/developers_guide/contributing_changes.rst new file mode 100644 index 0000000000..48357874a7 --- /dev/null +++ b/docs/src/developers_guide/contributing_changes.rst @@ -0,0 +1,11 @@ + +.. _contributing.changes: + +Contributing Your Changes +========================= + +.. toctree:: + :maxdepth: 3 + + documenting/whats_new_contributions + contributing_pull_request_checklist diff --git a/docs/src/developers_guide/contributing_ci_tests.rst b/docs/src/developers_guide/contributing_ci_tests.rst new file mode 100644 index 0000000000..542178c2ff --- /dev/null +++ b/docs/src/developers_guide/contributing_ci_tests.rst @@ -0,0 +1,151 @@ +.. include:: ../common_links.inc + +.. _developer_testing_ci: + +Continuous Integration (CI) Testing +=================================== + +.. note:: Iris is currently supported and tested against |python_support| + running on Linux. We do not currently actively test on other + platforms such as Windows or macOS. + +The `Iris`_ GitHub repository is configured to run checks against all its +branches automatically whenever a pull-request is created, updated or merged. +The checks performed are: + +* :ref:`testing_gha` +* :ref:`testing_cla` +* :ref:`pre_commit_ci` + + +.. _testing_gha: + +GitHub Actions +************** + +Iris unit and integration tests are an essential mechanism to ensure +that the Iris code base is working as expected. :ref:`developer_running_tests` +may be performed manually by a developer locally. However Iris is configured to +use `GitHub Actions`_ (GHA) for automated Continuous Integration (CI) testing. + +The Iris GHA YAML configuration files in the ``.github/workflows`` directory +defines the CI tasks to be performed. For further details +refer to the `GitHub Actions`_ documentation. The tasks performed during CI include: + +* running the system, integration and unit tests for Iris +* ensuring the documentation gallery builds successfully +* performing all doc-tests within the code base +* checking all URL references within the code base and documentation are valid + +The above GHA tasks are run automatically against all `Iris`_ branches +on GitHub whenever a pull-request is submitted, updated or merged. See the +`Iris GitHub Actions`_ dashboard for details of recent past and active CI jobs. + + +.. _gha_test_env: + +GitHub Actions Test Environment +------------------------------- + +The CI test environments for our GHA is determined from the requirement files +in ``requirements/pyXX.yml``. These are conda environment files list the top-level +package dependencies for running and testing Iris. + +For reproducible test results, these environments are resolved for all their dependencies +and stored as conda lock files in the ``requirements/locks`` directory. The test environments +will not resolve the dependencies each time, instead they will use the lock files to reproduce the +exact same environment each time. + +**If you have updated the requirement YAML files with new dependencies, you will need to +generate new lock files.** To do this, run the command:: + + python tools/update_lockfiles.py -o requirements/locks requirements/py*.yml + +or simply:: + + make lockfiles + +and add the changed lockfiles to your pull request. + +New lockfiles are generated automatically each week to ensure that Iris continues to be +tested against the latest available version of its dependencies. +Each week the yaml files in ``requirements`` are resolved by a GitHub Action. +If the resolved environment has changed, a pull request is created with the new lock files. +The CI test suite will run on this pull request. If the tests fail, a developer +will need to create a new branch based off the ``auto-update-lockfiles`` branch +and add the required fixes to this new branch. If the fixes are made to the +``auto-update-lockfiles`` branch these will be overwritten the next time the +Github Action is run. + + +GitHub Checklist +---------------- + +An example snapshot from a successful GitHub pull-request shows all tests +passing: + +.. image:: ci_checks.png + +If any CI tasks fail, then the pull-request is unlikely to be merged to the +Iris target branch by a core developer. + + +.. _testing_cla: + +`CLA Assistant`_ +**************** + +A bot which checks that the GitHub authors of the pull-request have signed the +|SciTools Contributor's License Agreement (CLA)|_. + + +.. _pre_commit_ci: + +pre-commit CI +************* + +A CI service for the `pre-commit`_ framework that checks and auto fixes all +pull-requests given the `Iris`_ GitHub repository `.pre-commit-config.yaml`_. + +See the `pre-commit.ci dashboard`_ for details of recent past and active Iris jobs. + +.. note:: + + The `codespell`_ ``pre-commit`` hook checks the spelling of the whole codebase + and documentation. This hook is configured in the ``[tool.codespell]`` section + of the ``pyproject.toml`` file. + + Append to the ``ignore-words-list`` option any **valid words** that are + considered **not** a typo and should **not** be corrected by `codespell`_. + +ruff +---- +As of **Iris 3.8** `ruff`_ has been adopted to ensure our codebase is using best +practice. `ruff`_ is configured in the `Iris`_ GitHub repository using +`.pre-commit-config.yaml`_. + +You can install and run `ruff`_ in your development **iris-dev** conda environment +via:: + + conda activate iris-dev + pip install ruff + cd iris + ruff . + +.. note:: + + The `ruff`_ ``pre-commit`` hook checks for compliance of the whole codebase. + This hook is configured in the ``[tool.ruff]`` section + of the ``pyproject.toml`` file. + + Edit the ``.ruff.toml`` file to include any *temporary* rules to be ignored. Edit the ``pyproject.toml`` to include any *permanent* rules to be ignored. We + aim to be fully `ruff`_ compliant as possible. + +For more information on how to use `ruff`_ please see the `ruff documentation`_. + + +.. _.pre-commit-config.yaml: https://github.com/SciTools/iris/blob/main/.pre-commit-config.yaml +.. _pre-commit.ci dashboard: https://results.pre-commit.ci/repo/github/5312648 +.. _CLA Assistant: https://github.com/cla-assistant/cla-assistant +.. |SciTools Contributor's License Agreement (CLA)| replace:: **SciTools Contributor's License Agreement (CLA)** +.. _ruff documentation: https://docs.astral.sh/ruff/tutorial/ diff --git a/docs/src/developers_guide/contributing_code_formatting.rst b/docs/src/developers_guide/contributing_code_formatting.rst new file mode 100644 index 0000000000..bb3140e4f9 --- /dev/null +++ b/docs/src/developers_guide/contributing_code_formatting.rst @@ -0,0 +1,69 @@ +.. include:: ../common_links.inc + +.. _code_formatting: + +Code Formatting +=============== + +To ensure a consistent code format throughout Iris, we recommend using +tools to check the source directly. + +* `black`_ for an opinionated coding auto-formatter +* `flake8`_ linting checks + +The preferred way to run these tools automatically is to setup and configure +`pre-commit`_. + +You can install ``pre-commit`` in your development environment using ``pip``:: + + $ pip install pre-commit + +or alternatively using ``conda``:: + + $ conda install -c conda-forge pre-commit + +.. note:: If you have setup your Python environment using the guide + :ref:`installing_from_source` then ``pre-commit`` should already + be present. + +In order to install the ``pre-commit`` git hooks defined in our +``.pre-commit-config.yaml`` file, you must now run the following command from +the root directory of Iris:: + + $ pre-commit install + +Upon performing a ``git commit``, your code will now be automatically formatted +to the ``black`` configuration defined in our ``pyproject.toml`` file, and +linted according to our ``.flake8`` configuration file. Note that, +``pre-commit`` will automatically download and install the necessary packages +for each ``.pre-commit-config.yaml`` git hook. + +Additionally, you may wish to enable ``black`` for your preferred +`editor/IDE `_. + +With the ``pre-commit`` configured, the output of performing a ``git commit`` +will look similar to:: + + Check for added large files..............................................Passed + Check for merge conflicts................................................Passed + Debug Statements (Python)............................(no files to check)Skipped + Don't commit to branch...................................................Passed + black................................................(no files to check)Skipped + flake8...............................................(no files to check)Skipped + [contribution_overhaul c8513187] this is my commit message + 2 files changed, 10 insertions(+), 9 deletions(-) + + +.. note:: You can also run `black`_ and `flake8`_ manually. Please see the + their officially documentation for more information. + +Type Hinting +------------ +Iris is gradually adding +`type hints `_ into the +codebase. The reviewer will look for type hints in a pull request; if you're +not confident with these, feel free to work together with the reviewer to +add/improve them. + + +.. _pre-commit: https://pre-commit.com/ diff --git a/docs/src/developers_guide/contributing_codebase_index.rst b/docs/src/developers_guide/contributing_codebase_index.rst new file mode 100644 index 0000000000..b59a196ff0 --- /dev/null +++ b/docs/src/developers_guide/contributing_codebase_index.rst @@ -0,0 +1,13 @@ +.. _contributing.documentation.codebase: + +Working with the Code Base +========================== + +.. toctree:: + :maxdepth: 3 + + contributing_code_formatting + documenting/docstrings + documenting/rest_guide + contributing_deprecations + contributing_testing_index diff --git a/docs/src/developers_guide/contributing_deprecations.rst b/docs/src/developers_guide/contributing_deprecations.rst new file mode 100644 index 0000000000..8c5cb21feb --- /dev/null +++ b/docs/src/developers_guide/contributing_deprecations.rst @@ -0,0 +1,120 @@ +.. _iris_development_deprecations: + +Deprecations +************ + +If you need to make a backwards-incompatible change to a public API +[#public-api]_ that has been included in a release (e.g. deleting a +method), then you must first deprecate the old behaviour in at least +one release, before removing/updating it in the next +`major release `_. + + +Adding a Deprecation +==================== + +.. _removing-a-public-api: + +Removing a Public API +--------------------- + +The simplest form of deprecation occurs when you need to remove a public +API. The public API in question is deprecated for a period before it is +removed to allow time for user code to be updated. Sometimes the +deprecation is accompanied by the introduction of a new public API. + +Under these circumstances the following points apply: + +- Using the deprecated API must result in a concise deprecation warning which + is an instance of :class:`iris.IrisDeprecation`. + It is easiest to call + :func:`iris._deprecation.warn_deprecated`, which is a + simple wrapper to :func:`warnings.warn` with the signature + `warn_deprecation(message, **kwargs)`. +- Where possible, your deprecation warning should include advice on + how to avoid using the deprecated API. For example, you might + reference a preferred API, or more detailed documentation elsewhere. +- You must update the docstring for the deprecated API to include a + Sphinx deprecation directive: + + :literal:`.. deprecated:: ` + + where you should replace `` with the major and minor version + of Iris in which this API is first deprecated. For example: `1.8`. + + As with the deprecation warning, you should include advice on how to + avoid using the deprecated API within the content of this directive. + Feel free to include more detail in the updated docstring than in the + deprecation warning. +- You should check the documentation for references to the deprecated + API and update them as appropriate. + +Changing a Default +------------------ + +When you need to change the default behaviour of a public API the +situation is slightly more complex. The recommended solution is to use +the :data:`iris.FUTURE` object. The :data:`iris.FUTURE` object provides +boolean attributes that allow user code to control at run-time the +default behaviour of corresponding public APIs. When a boolean attribute +is set to `False` it causes the corresponding public API to use its +deprecated default behaviour. When a boolean attribute is set to `True` +it causes the corresponding public API to use its new default behaviour. + +The following points apply in addition to those for removing a public +API: + +- You should add a new boolean attribute to :data:`iris.FUTURE` (by + modifying :class:`iris.Future`) that controls the default behaviour + of the public API that needs updating. The initial state of the new + boolean attribute should be `False`. You should name the new boolean + attribute to indicate that setting it to `True` will select the new + default behaviour. +- You should include a reference to this :data:`iris.FUTURE` flag in your + deprecation warning and corresponding Sphinx deprecation directive. + + +Removing a Deprecation +====================== + +When the time comes to make a new major release you should locate any +deprecated APIs within the code that satisfy the one release +minimum period described previously. Locating deprecated APIs can easily +be done by searching for the Sphinx deprecation directives and/or +deprecation warnings. + +Removing a Public API +--------------------- + +The deprecated API should be removed and any corresponding documentation +and/or example code should be removed/updated as appropriate. + +.. _iris_developer_future: + +Changing a Default +------------------ + +- You should update the initial state of the relevant boolean attribute + of :data:`iris.FUTURE` to `True`. +- You should deprecate setting the relevant boolean attribute of + :class:`iris.Future` in the same way as described in + :ref:`removing-a-public-api`. + + +.. rubric:: Footnotes + +.. [#public-api] A name without a leading underscore in any of its + components, with the exception of the :mod:`iris.experimental` and + :mod:`iris.tests` packages. + + Example public names are: + - `iris.this.` + - `iris.this.that` + + Example private names are: + - `iris._this` + - `iris.this._that` + - `iris._this.that` + - `iris._this._that` + - `iris.experimental.something` + - `iris.tests.get_data_path` diff --git a/docs/src/developers_guide/contributing_documentation.rst b/docs/src/developers_guide/contributing_documentation.rst new file mode 100644 index 0000000000..e289b1548d --- /dev/null +++ b/docs/src/developers_guide/contributing_documentation.rst @@ -0,0 +1,21 @@ + +How to Contribute to the Documentation +-------------------------------------- + +Documentation is important and we encourage any improvements that can be made. +If you believe the documentation is not clear please contribute a change to +improve the documentation for all users. + +If you're confident diving right in, please head for +:ref:`contributing.documentation_full`. + +If you're not then we've got a step-by-step guide here to walk you through it: +:ref:`contributing.documentation_easy` + +.. toctree:: + :maxdepth: 1 + :hidden: + + contributing_documentation_easy + contributing_documentation_full + \ No newline at end of file diff --git a/docs/src/developers_guide/contributing_documentation_easy.rst b/docs/src/developers_guide/contributing_documentation_easy.rst new file mode 100755 index 0000000000..a0513fe560 --- /dev/null +++ b/docs/src/developers_guide/contributing_documentation_easy.rst @@ -0,0 +1,103 @@ + +.. include:: ../common_links.inc + +.. _contributing.documentation_easy: + +Contributing to the Documentation (the easy way) +------------------------------------------------ + +Documentation is important and we encourage any improvements that can be made. +If you believe the documentation is not clear please contribute a change to +improve the documentation for all users. + +The guide below is designed to be accessible to those with little-to-no +knowledge of programming and GitHub. If you find that something doesn't work as +described or could use more explanation then please let us know (or contribute +the improvement yourself)! + +First Time Only Steps +^^^^^^^^^^^^^^^^^^^^^ + +1. Create a `GitHub `_ account. + +2. Complete the `SciTools Contributor's License Agreement (CLA)`_. + This is a one-off requirement for anyone who wishes to contribute to a + Scitools repository - including the documentation. + +Steps to Complete Each Time You Propose Changes +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +1. Navigate to the documentation page that you want to edit (on this site). + +2. Click the ``Edit on GitHub`` button at the **top right** of the page. + +.. image:: edit_on_github.png + +3. In the resulting GitHub page select **main** from the ``Switch + branches/tags`` drop-down menu near the **top left** of the page (to the left + of the ``iris / docs / src / ...`` links) if it isn't already. This changes + the branch to **main**. + +.. image:: find_main.png + +4. Click the pencil symbol near the **top right** (to the right of the ``Raw`` + and ``Blame`` buttons). + +.. image:: edit_button.png + +5. Make your edits! Try to strike a balance between informing the audience + enough that they understand and overwhelming them with information. + +.. note:: + + You may see the following message at the top of the edit page, informing you + that GitHub has created you your own ``fork`` (or copy) of the project as a + precursor to allowing you to edit the page. Your changes will be merged into + the main version of the documentation later. + + .. image:: fork_banner.png + +6. Scroll to the bottom of the edit page and enter some appropriate information + in the two boxes under ``Propose changes``. You can just keep the default text + if you like or enter something more specific - a short sentence explaining + what's changed is fine. Then click the ``Propose changes`` button. + +.. image:: propose_changes.png + +7. In the resulting page titled ``Pull Request``, write a brief description of + what you've changed underneath the following three lines: + +.. code:: + + ### Description + + + +Describing what you've changed and why will help the person who reviews your changes. + +.. image:: pull_request.png + +8. Click the ``Create pull request`` button. + +.. tip:: + + If you're not sure that you're making your pull request right, or have a + question, then make it anyway! You can then comment on it to ask your + question, then someone from the dev team will be happy to help you out (then + edit your pull request if you need to). + +What Happens Next? +^^^^^^^^^^^^^^^^^^ + +Another Iris contributor will review your changes (this happens for everyone who +makes changes to Iris or its documentation). The reviewer might make comments or +ask questions (don't worry about missing these, GitHub will email you to let you +know). You can respond to these comments underneath where they appear in GitHub. + +Once you've worked everything out together, the reviewer will merge your changes +into the main version of the documentation so that they're accessible for +everyone to benefit from. + +**You've now contributed to the Iris documentation!** If you've caught the bug +and want to get more involved (or you're just interested what that would mean) +then chat to the person reviewing your code or another Iris contributor. \ No newline at end of file diff --git a/docs/src/developers_guide/contributing_documentation_full.rst b/docs/src/developers_guide/contributing_documentation_full.rst new file mode 100755 index 0000000000..5cb5269fa1 --- /dev/null +++ b/docs/src/developers_guide/contributing_documentation_full.rst @@ -0,0 +1,171 @@ +.. include:: ../common_links.inc + +.. _contributing.documentation_full: + +Contributing to the Documentation +--------------------------------- + +This guide is for those comfortable with the development process, looking for +the specifics of how to apply that knowledge to Iris. You may instead find it +easier to use the :ref:`contributing.documentation_easy`. + +Any change to the Iris project whether it is a bugfix, new feature or +documentation update must use the :ref:`development-workflow`. + + +Requirements +~~~~~~~~~~~~ + +The documentation uses specific packages that need to be present. Please see +:ref:`installing_iris` for instructions. + + +.. _contributing.documentation.building: + +Building +~~~~~~~~ + +This documentation was built using the latest Python version that Iris +supports. For more information see :ref:`installing_iris`. + +The build can be run from the documentation directory ``docs/src``. + +The build output for the html is found in the ``_build/html`` sub directory. +When updating the documentation ensure the html build has *no errors* or +*warnings* otherwise it may fail the automated `Iris GitHub Actions`_ build. + +Once the build is complete, if it is rerun it will only rebuild the impacted +build artefacts so should take less time. + +There is an option to perform a build but skip the +:ref:`contributing.documentation.gallery` creation completely. This can be +achieved via:: + + make html-noplot + +Another option is to skip the :doc:`../generated/api/iris` documentation creation. This can be +useful as it reduces the time to build the documentation, however you may have +some build warnings as there maybe references to the API documentation. +This can be achieved via:: + + make html-noapi + +You can combine both the above and skip the +:ref:`contributing.documentation.gallery` and :doc:`../generated/api/iris` +documentation completely. This can be achieved via:: + + make html-quick + +If you wish to run a full clean build you can run:: + + make clean + make html + +This is useful for a final test before committing your changes. Having built +the documentation, you can view them in your default browser via:: + + make show + +.. note:: In order to preserve a clean build for the html, all **warnings** + have been promoted to be **errors** to ensure they are addressed. + This **only** applies when ``make html`` is run. + +.. _contributing.documentation.testing: + +Testing +~~~~~~~ + +There are various ways to test aspects of the documentation. + +Each :ref:`contributing.documentation.gallery` entry has a corresponding test. +To run all the gallery tests:: + + pytest -v docs/gallery_tests/test_gallery_examples.py + +To run a test for a single gallery example, use the ``pytest -k`` option for +pattern matching, e.g.:: + + pytest -v -k plot_coriolis docs/gallery_tests/test_gallery_examples.py + +If a gallery test fails, follow the instructions in :ref:`testing.graphics`. + +The ``make`` commands shown below can be run in the ``docs`` or ``docs/src`` +directory. + +Many documentation pages includes python code itself that can be run to ensure +it is still valid or to demonstrate examples. To ensure these tests pass +run:: + + make doctest + +See :data:`iris.cube.Cube.data` for an example of using the `doctest`_ +approach. + +.. _doctest: https://www.sphinx-doc.org/en/stable/ext/doctest.html + +The hyperlinks in the documentation can be checked automatically. +If there is a link that is known to work it can be excluded from the checks by +adding it to the ``linkcheck_ignore`` array that is defined in the +`conf.py`_. The hyperlink check can be run via:: + + make linkcheck + +If this fails check the output for the text **broken** and then correct +or ignore the url. + +.. note:: In addition to the automated `Iris GitHub Actions`_ build of all the + documentation build options above, the + https://readthedocs.org/ service is also used. The configuration + of this held in a file in the root of the + `github Iris project `_ named + ``.readthedocs.yml``. + + +.. _conf.py: https://github.com/SciTools/iris/blob/main/docs/src/conf.py + + +.. _contributing.documentation.api: + +Generating API Documentation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In order to auto generate the API documentation based upon the docstrings a +custom set of python scripts are used, these are located in the directory +``docs/src/sphinxext``. Once the ``make html`` command has been run, +the output of these scripts can be found in +``docs/src/generated/api``. + +If there is a particularly troublesome module that breaks the ``make html`` you +can exclude the module from the API documentation. Add the entry to the +``exclude_modules`` tuple list in the +``docs/src/sphinxext/generate_package_rst.py`` file. + + +.. _contributing.documentation.gallery: + +Gallery +~~~~~~~ + +The Iris :ref:`gallery_index` uses a sphinx extension named +`sphinx-gallery `_ +that auto generates reStructuredText (rst) files based upon a gallery source +directory that abides directory and filename convention. + +The code for the gallery entries are in ``docs/gallery_code``. +Each sub directory in this directory is a sub section of the gallery. The +respective ``README.rst`` in each folder is included in the gallery output. + +To add an entry to the gallery simple place your python code into the +appropriate sub directory and name it with a prefix of ``plot_``. If your +gallery entry does not fit into any existing sub directories then create a new +directory and place it in there. A test for the gallery entry will be +automatically generated (see Testing_ for how to run it). To add a new +reference image for this test, follow the instructions in +:ref:`testing.graphics`. + +The reStructuredText (rst) output of the gallery is located in +``docs/src/generated/gallery``. + +For more information on the directory structure and options please see the +`sphinx-gallery getting started +`_ documentation. diff --git a/docs/src/developers_guide/contributing_getting_involved.rst b/docs/src/developers_guide/contributing_getting_involved.rst new file mode 100644 index 0000000000..9da6cd13eb --- /dev/null +++ b/docs/src/developers_guide/contributing_getting_involved.rst @@ -0,0 +1,65 @@ +.. include:: ../common_links.inc + +.. _development_where_to_start: +.. _developers_guide: + +Developers Guide +---------------- + +Iris_ is an Open Source project hosted on Github and as such anyone with a +GitHub account may create an `issue`_ on our `Iris GitHub`_ project page for +raising: + +* bug reports +* feature requests +* documentation improvements + +The `Iris GitHub`_ project has been configured to use templates for each of +the above issue types when creating a `new issue`_ to ensure the appropriate +information is provided. + +Alternatively, **join the conversation** in Iris `GitHub Discussions`_, when +you would like the opinions of the Iris community. + +A `pull request`_ may also be created by anyone who has become a +|contributor|_ to Iris_. Permissions to merge pull requests to the +``main`` branch are only given to |core developers|_ of Iris_, this is +to ensure a measure of control. All authors on a pull request will +automatically be asked to sign the +`SciTools Contributor's License Agreement (CLA)`_, if they have not already +done so. + +To get started we suggest reading recent `issues`_, `GitHub Discussions`_ and +`pull requests`_ for Iris. + +If you are new to using GitHub we recommend reading the +`GitHub getting started`_ + +.. _GitHub getting started: https://docs.github.com/en/github/getting-started-with-github +.. |contributor| replace:: **contributor** +.. |core developers| replace:: **core developers** + + +.. toctree:: + :maxdepth: 1 + :caption: Developers Guide + :name: development_index + :hidden: + + gitwash/index + contributing_documentation + contributing_codebase_index + contributing_changes + github_app + release + + +.. toctree:: + :maxdepth: 1 + :caption: Reference + :hidden: + + ../generated/api/iris + ../whatsnew/index + ../copyright + ../voted_issues diff --git a/docs/src/developers_guide/contributing_graphics_tests.rst b/docs/src/developers_guide/contributing_graphics_tests.rst new file mode 100644 index 0000000000..1e42c35ae6 --- /dev/null +++ b/docs/src/developers_guide/contributing_graphics_tests.rst @@ -0,0 +1,142 @@ +.. include:: ../common_links.inc + +.. _testing.graphics: + +Adding or Updating Graphics Tests +================================= + +.. note:: + + If a large number of images tests are failing due to an update to the + libraries used for image hashing, follow the instructions on + :ref:`refresh-imagerepo`. + +Generating New Results +---------------------- + +When you find that a graphics test in the Iris testing suite has failed, +following changes in Iris or the run dependencies, this is the process +you should follow: + +#. Create a new, empty directory to store temporary image results, at the path + ``lib/iris/tests/result_image_comparison`` in your Iris repository checkout. + +#. Run the relevant (failing) tests directly as python scripts, or using + ``pytest``. + +The results of the failing image tests will now be available in +``lib/iris/tests/result_image_comparison``. + +.. note:: + + The ``result_image_comparison`` folder is covered by a project + ``.gitignore`` setting, so those files *will not show up* in a + ``git status`` check. + +Reviewing Failing Tests +----------------------- + +#. Run ``iris/lib/iris/tests/graphics/idiff.py`` with python, e.g.:: + + python idiff.py + + This will open a window for you to visually inspect + side-by-side **old**, **new** and **difference** images for each failed + graphics test. Hit a button to either :guilabel:`accept`, + :guilabel:`reject` or :guilabel:`skip` each new result. + + If the change is **accepted**: + + * the imagehash value of the new result image is added into the relevant + set of 'valid result hashes' in the image result database file, + ``tests/results/imagerepo.json`` + + * the relevant output file in ``tests/result_image_comparison`` is renamed + according to the test name. A copy of this new PNG file must then be added + into the ``iris-test-data`` repository, at + https://github.com/SciTools/iris-test-data (See below). + + If a change is **skipped**: + + * no further changes are made in the repo. + + * when you run ``iris/tests/idiff.py`` again, the skipped choice will be + presented again. + + If a change is **rejected**: + + * the output image is deleted from ``result_image_comparison``. + + * when you run ``iris/tests/idiff.py`` again, the skipped choice will not + appear, unless the relevant failing test is re-run. + +#. **Now re-run the tests**. The **new** result should now be recognised and the + relevant test should pass. However, some tests can perform *multiple* + graphics checks within a single test case function. In those cases, any + failing check will prevent the following ones from being run, so a test + re-run may encounter further (new) graphical test failures. If that + happens, simply repeat the check-and-accept process until all tests pass. + +#. You're now ready to :ref:`add-graphics-test-changes` + + +Adding a New Image Test +----------------------- + +If you attempt to run ``idiff.py`` when there are new graphical tests for which +no baseline yet exists, you will get a warning that ``idiff.py`` is ``Ignoring +unregistered test result...``. In this case, + +#. rename the relevant images from ``iris/tests/result_image_comparison`` by + + * removing the ``result-`` prefix + + * fully qualifying the test name if it isn't already (i.e. it should start + ``iris.tests...``or ``gallery_tests...``) + +#. run the tests in the mode that lets them create missing data (see + :ref:`create-missing`). This will update ``imagerepo.json`` with the new + test name and image hash. + +#. and then add them to the Iris test data as covered in + :ref:`add-graphics-test-changes`. + + +.. _refresh-imagerepo: + +Refreshing the Stored Hashes +---------------------------- + +From time to time, a new version of the image hashing library will cause all +image hashes to change. The image hashes stored in +``tests/results/imagerepo.json`` can be refreshed using the baseline images +stored in the ``iris-test-data`` repository (at +https://github.com/SciTools/iris-test-data) using the script +``tests/graphics/recreate_imagerepo.py``. Use the ``--help`` argument for the +command line arguments. + + +.. _add-graphics-test-changes: + +Add Your Changes to Iris +------------------------ + +To add your changes to Iris, you need to make two pull requests (PR). + +#. The first PR is made in the ``iris-test-data`` repository, at + https://github.com/SciTools/iris-test-data. + + * Add all the newly-generated referenced PNG files into the + ``test_data/images`` directory. In your Iris repo, these files are to be found + in the temporary results folder ``iris/tests/result_image_comparison``. + + * Create a PR proposing these changes, in the usual way. + +#. The second PR is the one that makes the changes you intend to the Iris_ repository. + The description box of this pull request should contain a reference to + the matching one in ``iris-test-data``. + + * This PR should include updating the version of the test data in + ``.github/workflows/ci-tests.yml`` and + ``.github/workflows/ci-docs-tests.yml`` to the new version created by the + merging of your ``iris-test-data`` PR. diff --git a/docs/src/developers_guide/contributing_pull_request_checklist.rst b/docs/src/developers_guide/contributing_pull_request_checklist.rst new file mode 100644 index 0000000000..11d68ace46 --- /dev/null +++ b/docs/src/developers_guide/contributing_pull_request_checklist.rst @@ -0,0 +1,61 @@ +.. include:: ../common_links.inc + +.. _pr_check: + +Pull Request Checklist +====================== + +All pull request will be reviewed by a core developer who will manage the +process of merging. It is the responsibility of the contributor submitting a +pull request to do their best to deliver a pull request which meets the +requirements of the project it is submitted to. + +This check list summarises criteria which will be checked before a pull request +is merged. Before submitting a pull request please consider the following: + + +#. **Provide a helpful description** of the Pull Request. This should include: + + * The aim of the change / the problem addressed / a link to the issue. + * How the change has been delivered. + +#. **Include a "What's New" entry**, if appropriate. + See :ref:`whats_new_contributions`. + +#. **Check all tests pass**. This includes existing tests and any new tests + added for any new functionality. For more information see + :ref:`developer_running_tests`. + +#. **Check all modified and new source files conform to the required** + :ref:`code_formatting`. + +#. **Check all new dependencies added to the** `requirements`_ **yaml + files.** If dependencies have been added then new nox testing lockfiles + should be generated too, see :ref:`gha_test_env`. + +#. **Check the source documentation been updated to explain all new or changed + features**. Note, we now use numpydoc strings. Any touched code should + be updated to use the docstrings formatting. See :ref:`docstrings`. + +#. **Include code examples inside the docstrings where appropriate**. See + :ref:`contributing.documentation.testing`. + +#. **Check the documentation builds without warnings or errors**. See + :ref:`contributing.documentation.building` + +#. **Check for any new dependencies in the** `readthedocs.yml`_ **file**. This + file is used to build the documentation that is served from + https://scitools-iris.readthedocs.io/en/latest/ + +#. **Check for updates needed for supporting projects for test or example + data**. For example: + + * `iris-test-data`_ is a github project containing all the data to support + the tests. + * `iris-sample-data`_ is a github project containing all the data to support + the gallery and examples. + + If new files are required by tests or code examples, they must be added to + the appropriate supporting project via a suitable pull-request. This pull + request should be referenced in the main Iris pull request and must be + accepted and merged before the Iris one can be. diff --git a/docs/src/developers_guide/contributing_running_tests.rst b/docs/src/developers_guide/contributing_running_tests.rst new file mode 100644 index 0000000000..f60cedba05 --- /dev/null +++ b/docs/src/developers_guide/contributing_running_tests.rst @@ -0,0 +1,197 @@ +.. include:: ../common_links.inc + +.. _developer_running_tests: + +Running the Tests +***************** + +There are two options for running the tests: + +* Use an environment you created yourself. This requires more manual steps to + set up, but gives you more flexibility. For example, you can run a subset of + the tests or use ``python`` interactively to investigate any issues. See + :ref:`test manual env`. + +* Use ``nox``. This will automatically generate an environment and run test + sessions consistent with our GitHub continuous integration. See :ref:`using nox`. + +.. _test manual env: + +Testing Iris in a Manually Created Environment +============================================== + +To create a suitable environment for running the tests, see :ref:`installing_from_source`. + +Many Iris tests will use data that may be defined in the test itself, however +this is not always the case as sometimes example files may be used. Due to +the size of some of the files used these are not kept in the Iris repository. +A separate repository under the `SciTools`_ organisation is used, see +https://github.com/SciTools/iris-test-data. + +In order to run the tests with **all** the test data you must clone the +``iris-test-data`` repository and then ensure the Iris tests can access +``iris-test-data/test_data``, using one of two methods: + +* Store the path in a shell environment variable named **OVERRIDE_TEST_DATA_REPOSITORY**. +* Store the path in ``lib/iris/etc/site.cfg`` (see :mod:`iris.config` for more). + +The example command below uses ``~/projects`` as the parent directory:: + + cd ~/projects + git clone git@github.com:SciTools/iris-test-data.git + export OVERRIDE_TEST_DATA_REPOSITORY=~/projects/iris-test-data/test_data + +All the Iris tests may be run from the root ``iris`` project directory using +``pytest``. For example:: + + pytest -n 2 + +will run the tests across two processes. For more options, use the command +``pytest -h``. Below is a trimmed example of the output:: + + ============================= test session starts ============================== + platform linux -- Python 3.10.5, pytest-7.1.2, pluggy-1.0.0 + rootdir: /path/to/git/clone/iris, configfile: pyproject.toml, testpaths: lib/iris + plugins: xdist-2.5.0, forked-1.4.0 + gw0 I / gw1 I + gw0 [6361] / gw1 [6361] + + ........................................................................ [ 1%] + ........................................................................ [ 2%] + ........................................................................ [ 3%] + ... + .......................ssssssssssssssssss............................... [ 99%] + ........................ [100%] + =============================== warnings summary =============================== + ... + -- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html + =========================== short test summary info ============================ + SKIPPED [1] lib/iris/tests/experimental/test_raster.py:152: Test requires 'gdal'. + SKIPPED [1] lib/iris/tests/experimental/test_raster.py:155: Test requires 'gdal'. + ... + ========= 6340 passed, 21 skipped, 1659 warnings in 193.57s (0:03:13) ========== + +There may be some tests that have been **skipped**. This is due to a Python +decorator being present in the test script that will intentionally skip a test +if a certain condition is not met. In the example output above there are +**21** skipped tests. At the point in time when this was run this was due to an +experimental dependency not being present. + +.. tip:: + + The most common reason for tests to be skipped is when the directory for the + ``iris-test-data`` has not been set which would shows output such as:: + + SKIPPED [1] lib/iris/tests/unit/fileformats/test_rules.py:157: Test(s) require external data. + SKIPPED [1] lib/iris/tests/unit/fileformats/pp/test__interpret_field.py:97: Test(s) require external data. + SKIPPED [1] lib/iris/tests/unit/util/test_demote_dim_coord_to_aux_coord.py:29: Test(s) require external data. + + All Python decorators that skip tests will be defined in + ``lib/iris/tests/__init__.py`` with a function name with a prefix of + ``skip_``. + +You can also run a specific test module. The example below runs the tests for +mapping:: + + cd lib/iris/tests + python test_mapping.py + +When running the test directly as above you can view the command line options +using the commands ``python test_mapping.py -h`` or +``python test_mapping.py --help``. + +.. tip:: A useful command line option to use is ``-d``. This will display + matplotlib_ figures as the tests are run. For example:: + + python test_mapping.py -d + +.. _using nox: + +Using Nox for Testing Iris +========================== + +The `nox`_ tool has for adopted for automated testing on `Iris GitHub Actions`_ +and also locally on the command-line for developers. + +`nox`_ is similar to `tox`_, but instead leverages the expressiveness and power of a Python +configuration file rather than an `.ini` style file. As with `tox`_, `nox`_ can use `virtualenv`_ +to create isolated Python environments, but in addition also supports `conda`_ as a testing +environment backend. + + +Where is Nox Used? +------------------ + +Iris uses `nox`_ as a convenience to fully automate the process of executing the Iris tests, but also +automates the process of: + +* building the documentation and executing the doc-tests +* building the documentation gallery +* running the documentation URL link check + +You can perform all of these tasks manually yourself, however the onus is on you to first ensure +that all of the required package dependencies are installed and available in the testing environment. + +`Nox`_ has been configured to automatically do this for you, and provides a means to easily replicate +the remote testing behaviour of `Iris GitHub Actions`_ locally for the developer. + + +Installing Nox +-------------- + +We recommend installing `nox`_ using `conda`_. To install `nox`_ in a separate `conda`_ environment:: + + conda create -n nox -c conda-forge nox + conda activate nox + +To install `nox`_ in an existing active `conda`_ environment:: + + conda install -c conda-forge nox + +The `nox`_ package is also available on PyPI, however `nox`_ has been configured to use the `conda`_ +backend for Iris, so an installation of `conda`_ must always be available. + + +Testing with Nox +---------------- + +The `nox`_ configuration file `noxfile.py` is available in the root ``iris`` project directory, and +defines all the `nox`_ sessions (i.e., tasks) that may be performed. `nox`_ must always be executed +from the ``iris`` root directory. + +To list the configured `nox`_ sessions for Iris:: + + nox --list + +To run the Iris tests for all configured versions of Python:: + + nox --session tests + +To build the Iris documentation specifically for Python 3.7:: + + nox --session doctest-3.7 + +To run all the Iris `nox`_ sessions:: + + nox + +For further `nox`_ command-line options:: + + nox --help + +.. tip:: + For `nox`_ sessions that use the `conda`_ backend, you can use the ``-v`` or ``--verbose`` + flag to display the `nox`_ `conda`_ environment package details and environment info. + For example:: + + nox --session tests -- --verbose + + +.. note:: `nox`_ will cache its testing environments in the `.nox` root ``iris`` project directory. + + +.. _setuptools: https://setuptools.readthedocs.io/en/latest/ +.. _tox: https://tox.readthedocs.io/en/latest/ +.. _virtualenv: https://virtualenv.pypa.io/en/latest/ +.. _PyPI: https://pypi.org/project/nox/ +.. _v41.5.0: https://setuptools.readthedocs.io/en/latest/history.html#v41-5-0 diff --git a/docs/src/developers_guide/contributing_testing.rst b/docs/src/developers_guide/contributing_testing.rst new file mode 100644 index 0000000000..a65bcebd55 --- /dev/null +++ b/docs/src/developers_guide/contributing_testing.rst @@ -0,0 +1,147 @@ +.. include:: ../common_links.inc + +.. _developer_test_categories: + + +Test Categories +*************** + +There are two main categories of tests within Iris: + +- :ref:`testing.unit_test` +- :ref:`testing.integration` + +Ideally, all code changes should be accompanied by one or more unit +tests, and by zero or more integration tests. + +But if in any doubt about what tests to add or how to write them please +feel free to submit a pull-request in any state and ask for assistance. + + +.. _testing.unit_test: + +Unit Tests +========== + +Code changes should be accompanied by enough unit tests to give a +high degree of confidence that the change works as expected. In +addition, the unit tests can help describe the intent behind a change. + +The docstring for each test module must state the unit under test. +For example: + + :literal:`"""Unit tests for the \`iris.experimental.raster.export_geotiff\` function."""` + +All unit tests must be placed and named according to the following +structure: + + +.. _testing.classes: + +Classes +------- + +When testing a class all the tests must reside in the module: + + :literal:`lib/iris/tests/unit//test_.py` + +Within this test module each tested method must have one or more +corresponding test classes, for example: + +* ``Test_`` +* ``Test___`` + +And within those test classes, the test methods must be named according +to the aspect of the tested method which they address. + +**Examples**: + +All unit tests for :py:class:`iris.cube.Cube` must reside in: + + :literal:`lib/iris/tests/unit/cube/test_Cube.py` + +Within that file the tests might look something like: + +.. code-block:: python + + # Tests for the Cube.xml() method. + class Test_xml(tests.IrisTest): + def test_some_general_stuff(self): + ... + + + # Tests for the Cube.xml() method, focussing on the behaviour of + # the checksums. + class Test_xml__checksum(tests.IrisTest): + def test_checksum_ignores_masked_values(self): + ... + + + # Tests for the Cube.add_dim_coord() method. + class Test_add_dim_coord(tests.IrisTest): + def test_normal_usage(self): + ... + + def test_coord_already_present(self): + ... + + +.. _testing.functions: + +Functions +--------- + +When testing a function all the tests must reside in the module: + + :literal:`lib/iris/tests/unit//test_.py` + +Within this test module there must be one or more test classes, for example: + +* ``Test`` +* ``TestAspectOfFunction`` + +And within those test classes, the test methods must be named according +to the aspect of the tested function which they address. + +**Examples**: + +All unit tests for :py:func:`iris.experimental.raster.export_geotiff` +must reside in: + + :literal:`lib/iris/tests/unit/experimental/raster/test_export_geotiff.py` + +Within that file the tests might look something like: + +.. code-block:: python + + # Tests focussing on the handling of different data types. + class TestDtypeAndValues(tests.IrisTest): + def test_int16(self): + ... + + def test_int16_big_endian(self): + ... + + + # Tests focussing on the handling of different projections. + class TestProjection(tests.IrisTest): + def test_no_ellipsoid(self): + ... + + +.. _testing.integration: + +Integration Tests +================= + +Some code changes may require tests which exercise several units in +order to demonstrate an important consequence of their interaction which +may not be apparent when considering the units in isolation. + +These tests must be placed in the ``lib/iris/tests/integration`` folder. +Unlike unit tests, there is no fixed naming scheme for integration +tests. But folders and files must be created as required to help +developers locate relevant tests. It is recommended they are named +according to the capabilities under test, e.g. +``metadata/test_pp_preservation.py``, and not named according to the +module(s) under test. diff --git a/docs/src/developers_guide/contributing_testing_index.rst b/docs/src/developers_guide/contributing_testing_index.rst new file mode 100644 index 0000000000..2f5ae411e8 --- /dev/null +++ b/docs/src/developers_guide/contributing_testing_index.rst @@ -0,0 +1,14 @@ +.. _testing: + +Testing +======= + +.. toctree:: + :maxdepth: 3 + + contributing_testing + testing_tools + contributing_graphics_tests + contributing_running_tests + contributing_ci_tests + contributing_benchmarks diff --git a/docs/src/developers_guide/documenting/__init__.py b/docs/src/developers_guide/documenting/__init__.py new file mode 100644 index 0000000000..6e031999e7 --- /dev/null +++ b/docs/src/developers_guide/documenting/__init__.py @@ -0,0 +1 @@ +# noqa: D104 diff --git a/docs/src/developers_guide/documenting/docstrings.rst b/docs/src/developers_guide/documenting/docstrings.rst new file mode 100644 index 0000000000..86f2c839c1 --- /dev/null +++ b/docs/src/developers_guide/documenting/docstrings.rst @@ -0,0 +1,34 @@ +.. _docstrings: + +========== +Docstrings +========== + +Every public object in the Iris package should have an appropriate docstring. +This is important as the docstrings are used by developers to understand +the code and may be read directly in the source or via the +:doc:`../../generated/api/iris`. + +.. note:: + As of April 2022 we are looking to adopt `numpydoc`_ strings as standard. + We aim to complete the adoption over time as we do changes to the codebase. + For examples of use see `numpydoc`_ and `sphinxcontrib-napoleon`_ + +For consistency always use: + +* ``"""triple double quotes"""`` around docstrings. +* ``r"""raw triple double quotes"""`` if you use any backslashes in your + docstrings. +* ``u"""Unicode triple-quoted string"""`` for Unicode docstrings + +All docstrings can use reST (reStructuredText) markup to augment the +rendered formatting. See the :ref:`reST_quick_start` for more detail. + +For more information including examples pleasee see: + +* `numpydoc`_ +* `sphinxcontrib-napoleon`_ + + +.. _numpydoc: https://numpydoc.readthedocs.io/en/latest/format.html#style-guide +.. _sphinxcontrib-napoleon: https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_numpy.html \ No newline at end of file diff --git a/docs/src/developers_guide/documenting/docstrings_attribute.py b/docs/src/developers_guide/documenting/docstrings_attribute.py new file mode 100644 index 0000000000..1714373a62 --- /dev/null +++ b/docs/src/developers_guide/documenting/docstrings_attribute.py @@ -0,0 +1,38 @@ +"""Docstring attribute example.""" + + +class ExampleClass: + """Class Summary.""" + + def __init__(self, arg1, arg2): + """Purpose section description. + + Description section text. + + Parameters + ---------- + arg1 : int + First argument description. + arg2 : float + Second argument description. + + Returns + ------- + bool + + """ + self.a = arg1 + "Attribute arg1 docstring." + self.b = arg2 + "Attribute arg2 docstring." + + @property + def square(self): + """*(read-only)* Purpose section description. + + Returns + ------- + int + + """ + return self.a * self.a diff --git a/docs/src/developers_guide/documenting/docstrings_sample_routine.py b/docs/src/developers_guide/documenting/docstrings_sample_routine.py new file mode 100644 index 0000000000..7feec6dbd0 --- /dev/null +++ b/docs/src/developers_guide/documenting/docstrings_sample_routine.py @@ -0,0 +1,27 @@ +"""Docstring routine example.""" + + +def sample_routine(arg1, arg2, kwarg1="foo", kwarg2=None): + """Purpose section text goes here. + + Description section longer text goes here. + + Parameters + ---------- + arg1 : numpy.ndarray + First argument description. + arg2 : numpy.ndarray + Second argument description. + kwarg1 : str, optional + The first keyword argument. This argument description + can be multi-lined. + **kwarg2 : bool, optional + The second keyword argument. + + Returns + ------- + numpy.ndarray + A numpy.ndarray of arg1 * arg2. + + """ + pass diff --git a/docs/src/developers_guide/documenting/rest_guide.rst b/docs/src/developers_guide/documenting/rest_guide.rst new file mode 100644 index 0000000000..9e8c1107b0 --- /dev/null +++ b/docs/src/developers_guide/documenting/rest_guide.rst @@ -0,0 +1,44 @@ +.. include:: ../../common_links.inc + +.. _reST_quick_start: + +================ +reST Quick Start +================ + +`reST`_ is used to create the documentation for Iris_. It is used to author +all of the documentation content including use in docstrings where appropriate. +For more information see :ref:`docstrings`. + +reST is a lightweight markup language intended to be highly readable in +source format. This guide will cover some of the more frequently used advanced +reST markup syntaxes, for the basics of reST the following links may be useful: + +* https://www.sphinx-doc.org/en/master/usage/restructuredtext/ +* https://packages.python.org/an_example_pypi_project/sphinx.html + +Reference documentation for reST can be found at https://docutils.sourceforge.net/rst.html. + +Creating Links +-------------- +Basic links can be created with ```Text of the link `_`` +which will look like `Text of the link `_ + + +Documents in the same project can be cross referenced with the syntax +``:doc:`document_name``` for example, to reference the "docstrings" page +``:doc:`docstrings``` creates the following link :doc:`docstrings` + + +References can be created between sections by first making a "label" where +you would like the link to point to ``.. _name_of_reference::`` the +appropriate link can now be created with ``:ref:`name_of_reference``` +(note the trailing underscore on the label) + + +Cross referencing other reference documentation can be achieved with the +syntax ``:py:class:`zipfile.ZipFile``` which will result in links such as +:py:class:`zipfile.ZipFile` and :py:class:`numpy.ndarray`. + + +.. _reST: https://en.wikipedia.org/wiki/ReStructuredText diff --git a/docs/src/developers_guide/documenting/whats_new_contributions.rst b/docs/src/developers_guide/documenting/whats_new_contributions.rst new file mode 100644 index 0000000000..82569e57a0 --- /dev/null +++ b/docs/src/developers_guide/documenting/whats_new_contributions.rst @@ -0,0 +1,145 @@ +.. include:: ../../common_links.inc + +.. _whats_new_contributions: + +================================= +Contributing a "What's New" Entry +================================= + +Iris uses a file named ``latest.rst`` to keep a draft of upcoming development +changes that will form the next stable release. Contributions to the +:ref:`iris_whatsnew` document are written by the developer most familiar +with the change made. The contribution should be included as part of +the Iris Pull Request that introduces the change. + +The ``latest.rst`` and the past release notes are kept in the +``docs/src/whatsnew/`` directory. If you are writing the first contribution after +an Iris release: **create the new** ``latest.rst`` by copying the content from +``latest.rst.template`` in the same directory. + +Since the `Contribution categories`_ include Internal changes, **all** Iris +Pull Requests should be accompanied by a "What's New" contribution. + + +Git Conflicts +============= + +If changes to ``latest.rst`` are being suggested in several simultaneous +Iris Pull Requests, Git will likely encounter merge conflicts. If this +situation is thought likely (large PR, high repo activity etc.): + +* PR author: Do not include a "What's New" entry. Mention in the PR text that a + "What's New" entry is pending + +* PR reviewer: Review the PR as normal. Once the PR is acceptable, ask that + a **new pull request** be created specifically for the "What's New" entry, + which references the main pull request and titled (e.g. for PR#9999): + + What's New for #9999 + +* PR author: create the "What's New" pull request + +* PR reviewer: once the "What's New" PR is created, **merge the main PR**. + (this will fix any `Iris GitHub Actions`_ linkcheck errors where the links in the + "What's New" PR reference new features introduced in the main PR) + +* PR reviewer: review the "What's New" PR, merge once acceptable + +These measures should mean the suggested ``latest.rst`` changes are outstanding +for the minimum time, minimising conflicts and minimising the need to rebase or +merge from trunk. + + +Writing a Contribution +====================== + +A contribution is the short description of a change introduced to Iris +which improved it in some way. As such, a single Iris Pull Request may +contain multiple changes that are worth highlighting as contributions to the +what's new document. + +The appropriate contribution for a pull request might in fact be an addition or +change to an existing "What's New" entry. + +Each contribution will ideally be written as a single concise entry using a +reStructuredText auto-enumerated list ``#.`` directive. Where possible do not +exceed **column 80** and ensure that any subsequent lines of the same entry are +aligned with the first. The content should target an Iris user as the audience. +The required content, in order, is as follows: + +* Use your discretion to decide on the names of all those that you want to + acknowledge as part of your contribution. Also consider the efforts of the + reviewer. Please use GitHub user names that link to their GitHub profile + e.g., + + ```@tkknight`_ Lorem ipsum dolor sit amet ...`` + + Also add a full reference in the following section at the end of the ``latest.rst``:: + + .. comment + Whatsnew author names (@github name) in alphabetical order. Note that, + core dev names are automatically included by the common_links.inc: + + .. _@tkknight: https://github.com/tkknight + +* A succinct summary of the new/changed behaviour. + +* Context to the change. Possible examples include: what this fixes, why + something was added, issue references (e.g. ``:issue:`9999```), more specific + detail on the change itself. + +* Pull request references, bracketed, following the final period e.g., + ``(:pull:`1111`, :pull:`9999`)`` + +* A trailing blank line (standard reStructuredText list format). + +For example:: + + #. `@tkknight `_ and + `@trexfeathers `_ (reviewer) changed + argument ``x`` to be optional in :class:`~iris.module.class` and + :meth:`iris.module.method`. This allows greater flexibility as requested in + :issue:`9999`. (:pull:`1111`, :pull:`9999`) + + +The above example also demonstrates some of the possible syntax for including +links to code. For more inspiration on possible content and references, please +examine past what's :ref:`iris_whatsnew` entries. + +.. note:: The reStructuredText syntax will be checked as part of building + the documentation. Any warnings should be corrected. The + `Iris GitHub Actions`_ will automatically build the documentation when + creating a pull request, however you can also manually + :ref:`build ` the documentation. + + +Contribution Categories +======================= + +The structure of the what's new release note should be easy to read by +users. To achieve this several categories may be used. + +**📢 Announcements** + General news and announcements to the Iris community. + +**✨ Features** + Features that are new or changed to add functionality. + +**🐛 Bug Fixes** + A bug fix. + +**💣 Incompatible Changes** + A change that causes an incompatibility with prior versions of Iris. + +**🔥 Deprecations** + Deprecations of functionality. + +**🔗 Dependencies** + Additions, removals and version changes in Iris' package dependencies. + +**📚 Documentation** + Changes to documentation. + +**💼 Internal** + Changes to any internal or development related topics, such as testing, + environment dependencies etc. diff --git a/docs/src/developers_guide/edit_button.png b/docs/src/developers_guide/edit_button.png new file mode 100755 index 0000000000..ee2e7858f0 Binary files /dev/null and b/docs/src/developers_guide/edit_button.png differ diff --git a/docs/src/developers_guide/edit_on_github.png b/docs/src/developers_guide/edit_on_github.png new file mode 100755 index 0000000000..f802ebd9d6 Binary files /dev/null and b/docs/src/developers_guide/edit_on_github.png differ diff --git a/docs/src/developers_guide/find_main.png b/docs/src/developers_guide/find_main.png new file mode 100755 index 0000000000..8a7af306cd Binary files /dev/null and b/docs/src/developers_guide/find_main.png differ diff --git a/docs/src/developers_guide/fork_banner.png b/docs/src/developers_guide/fork_banner.png new file mode 100755 index 0000000000..0d140c9fc6 Binary files /dev/null and b/docs/src/developers_guide/fork_banner.png differ diff --git a/docs/src/developers_guide/github_app.rst b/docs/src/developers_guide/github_app.rst new file mode 100644 index 0000000000..402cfe0c75 --- /dev/null +++ b/docs/src/developers_guide/github_app.rst @@ -0,0 +1,281 @@ +.. include:: ../common_links.inc + +Token GitHub App +---------------- + +.. note:: + + This section of the documentation is applicable only to GitHub `SciTools`_ + Organisation **owners** and **administrators**. + +.. note:: + + The ``iris-actions`` GitHub App has been rebranded with the more generic + name ``scitools-ci``, as the app can be used for any `SciTools`_ repository, + not just ``iris`` specifically. + + All of the following instructions are still applicable. + + +This section describes how to create, configure, install and use our `SciTools`_ +GitHub App for generating tokens for use with *GitHub Actions* (GHA). + + +Background +^^^^^^^^^^ + +Our GitHub *Continuous Integration* (CI) workflows require fully reproducible +`conda`_ environments to test ``iris`` and build our documentation. + +The ``iris`` `refresh-lockfiles`_ GHA workflow uses the `conda-lock`_ package to routinely +generate a platform specific ``lockfile`` containing all the package dependencies +required by ``iris`` for a specific version of ``python``. + +The environment lockfiles created by the `refresh-lockfiles`_ GHA are contributed +back to ``iris`` though a pull-request that is automatically generated using the +third-party `create-pull-request`_ GHA. By default, pull-requests created by such an +action using the standard ``GITHUB_TOKEN`` **cannot** trigger other workflows, such +as our CI. + +As a result, we use a dedicated authentication **GitHub App** to securely generate tokens +for the `create-pull-request`_ GHA, which then permits our full suite of CI testing workflows +to be triggered against the lockfiles pull-request. Ensuring that the CI is triggered gives us +confidence that the proposed new lockfiles have not introduced a package level incompatibility +or issue within ``iris``. See :ref:`use gha`. + + +Create GitHub App +^^^^^^^^^^^^^^^^^ + +The **GitHub App** is created for the sole purpose of generating tokens for use with actions, +and **must** be owned by the `SciTools`_ organisation. + +To create a minimal `GitHub App`_ for this purpose, perform the following steps: + +1. Click the `SciTools`_ organisation ``⚙️ Settings`` option. + +.. figure:: assets/scitools-settings.png + :alt: SciTools organisation Settings option + :align: center + :width: 75% + +2. Click the ``GitHub Apps`` option from the ``<> Developer settings`` + section in the left hand sidebar. + +.. figure:: assets/developer-settings-github-apps.png + :alt: Developer settings, GitHub Apps option + :align: center + :width: 25% + +3. Now click the ``New GitHub App`` button to display the ``Register new GitHub App`` + form. + +Within the ``Register new GitHub App`` form, complete the following fields: + +4. Set the **mandatory** ``GitHub App name`` field to be ``iris-actions``. +5. Set the **mandatory** ``Homepage URL`` field to be ``https://github.com/SciTools/iris`` +6. Under the ``Webhook`` section, **uncheck** the ``Active`` checkbox. + Note that, **no** ``Webhook URL`` is required. + +.. figure:: assets/webhook-active.png + :alt: Webhook active checkbox + :align: center + :width: 75% + +7. Under the ``Repository permissions`` section, set the ``Contents`` field to + be ``Access: Read and write``. + +.. figure:: assets/repo-perms-contents.png + :alt: Repository permissions Contents option + :align: center + :width: 75% + +8. Under the ``Repository permissions`` section, set the ``Pull requests`` field + to be ``Access: Read and write``. + +.. figure:: assets/repo-perms-pull-requests.png + :alt: Repository permissions Pull requests option + :align: center + :width: 75% + +9. Under the ``Organization permissions`` section, set the ``Members`` field to + be ``Access: Read-only``. + +.. figure:: assets/org-perms-members.png + :alt: Organization permissions Members + :align: center + :width: 75% + +10. Under the ``User permissions`` section, for the ``Where can this GitHub App be installed?`` + field, **check** the ``Only on this account`` radio-button i.e., only allow + this GitHub App to be installed on the **SciTools** account. + +.. figure:: assets/user-perms.png + :alt: User permissions + :align: center + :width: 75% + +11. Finally, click the ``Create GitHub App`` button. + + +Configure GitHub App +^^^^^^^^^^^^^^^^^^^^ + +Creating the GitHub App will automatically redirect you to the ``SciTools settings / iris-actions`` +form for the newly created app. + +Perform the following GitHub App configuration steps: + +.. _app id: + +1. Under the ``About`` section, note of the GitHub ``App ID`` as this value is + required later. See :ref:`gha secrets`. +2. Under the ``Display information`` section, optionally upload the ``iris`` logo + as a ``png`` image. +3. Under the ``Private keys`` section, click the ``Generate a private key`` button. + +.. figure:: assets/generate-key.png + :alt: Private keys Generate a private key + :align: center + :width: 75% + +.. _private key: + +GitHub will automatically generate a private key to sign access token requests +for the app. Also a separate browser pop-up window will appear with the GitHub +App private key in ``OpenSSL PEM`` format. + +.. figure:: assets/download-pem.png + :alt: Download OpenSSL PEM file + :align: center + :width: 50% + +.. important:: + + Please ensure that you save the ``OpenSSL PEM`` file and **securely** archive + its contents. The private key within this file is required later. + See :ref:`gha secrets`. + + +Install GitHub App +^^^^^^^^^^^^^^^^^^ + +To install the GitHub App: + +1. Select the ``Install App`` option from the top left menu of the + ``Scitools settings / iris-actions`` form, then click the ``Install`` button. + +.. figure:: assets/install-app.png + :alt: Private keys Generate a private key + :align: center + :width: 75% + +2. Select the ``Only select repositories`` radio-button from the ``Install iris-actions`` + form, and choose the ``SciTools/iris`` repository. + +.. figure:: assets/install-iris-actions.png + :alt: Install iris-actions GitHub App + :align: center + :width: 75% + +3. Click the ``Install`` button. + + The successfully installed ``iris-actions`` GitHub App is now available under + the ``GitHub Apps`` option in the ``Integrations`` section of the `SciTools`_ + organisation ``Settings``. Note that, to reconfigure the installed app click + the ``⚙️ App settings`` option. + +.. figure:: assets/installed-app.png + :alt: Installed GitHub App + :align: center + :width: 80% + +4. Finally, confirm that the ``iris-actions`` GitHub App is now available within + the `SciTools/iris`_ repository by clicking the ``GitHub apps`` option in the + ``⚙️ Settings`` section. + +.. figure:: assets/iris-github-apps.png + :alt: Iris installed GitHub App + :align: center + :width: 80% + + +.. _gha secrets: + +Create Repository Secrets +^^^^^^^^^^^^^^^^^^^^^^^^^ + +The GitHub Action that requests an access token from the ``iris-actions`` +GitHub App must be configured with the following information: + +* the ``App ID``, and +* the ``OpenSSL PEM`` private key + +associated with the ``iris-actions`` GitHub App. This **sensitive** information is +made **securely** available by creating `SciTools/iris`_ repository secrets: + +1. Click the `SciTools/iris`_ repository ``⚙️ Settings`` option. + +.. figure:: assets/iris-settings.png + :alt: Iris Settings + :align: center + :width: 75% + +2. Click the ``Actions`` option from the ``Security`` section in the left hand + sidebar. + +.. figure:: assets/iris-security-actions.png + :alt: Iris Settings Security Actions + :align: center + :width: 25% + +3. Click the ``New repository secret`` button. + +.. figure:: assets/iris-actions-secret.png + :alt: Iris Actions Secret + :align: center + :width: 75% + +4. Complete the ``Actions secrets / New secret`` form for the ``App ID``: + + * Set the ``Name`` field to be ``AUTH_APP_ID``. + * Set the ``Value`` field to be the numerical ``iris-actions`` GitHub ``App ID``. + See :ref:`here `. + * Click the ``Add secret`` button. + +5. Click the ``New repository secret`` button again, and complete the form + for the ``OpenSSL PEM``: + + * Set the ``Name`` field to be ``AUTH_APP_PRIVATE_KEY``. + * Set the ``Value`` field to be the entire contents of the ``OpenSSL PEM`` file. + See :ref:`here `. + * Click the ``Add secret`` button. + +A summary of the newly created `SciTools/iris`_ repository secrets is now available: + +.. figure:: assets/iris-secrets-created.png + :alt: Iris Secrets created + :align: center + :width: 75% + + +.. _use gha: + +Use GitHub App +^^^^^^^^^^^^^^ + +The following example workflow shows how to use the `github-app-token`_ GHA +to generate a token for use with the `create-pull-request`_ GHA: + +.. figure:: assets/gha-token-example.png + :alt: GitHub Action token example + :align: center + :width: 50% + + +.. _GitHub App: https://docs.github.com/en/developers/apps/building-github-apps/creating-a-github-app +.. _SciTools/iris: https://github.com/SciTools/iris +.. _conda-lock: https://github.com/conda-incubator/conda-lock +.. _create-pull-request: https://github.com/peter-evans/create-pull-request +.. _github-app-token: https://github.com/tibdex/github-app-token +.. _refresh-lockfiles: https://github.com/SciTools/iris/blob/main/.github/workflows/refresh-lockfiles.yml diff --git a/docs/src/developers_guide/gitwash/LICENSE b/docs/src/developers_guide/gitwash/LICENSE new file mode 100644 index 0000000000..cd8441c161 --- /dev/null +++ b/docs/src/developers_guide/gitwash/LICENSE @@ -0,0 +1,34 @@ +========= + LICENSE +========= + +We release the documents under the Creative Commons attribution license: +https://creativecommons.org/licenses/by/3.0/ + +We release the code under the simplified BSD license: + +Copyright (c) 2010, Matthew Brett +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/docs/src/developers_guide/gitwash/branch_dropdown.png b/docs/src/developers_guide/gitwash/branch_dropdown.png new file mode 100644 index 0000000000..6d74f3d643 Binary files /dev/null and b/docs/src/developers_guide/gitwash/branch_dropdown.png differ diff --git a/docs/iris/src/developers_guide/gitwash/configure_git.rst b/docs/src/developers_guide/gitwash/configure_git.rst similarity index 88% rename from docs/iris/src/developers_guide/gitwash/configure_git.rst rename to docs/src/developers_guide/gitwash/configure_git.rst index 0e18b666d0..564ae51820 100644 --- a/docs/iris/src/developers_guide/gitwash/configure_git.rst +++ b/docs/src/developers_guide/gitwash/configure_git.rst @@ -1,8 +1,10 @@ +.. include:: links.inc + .. _configure-git: -=============== - Configure git -=============== +============= +Configure Git +============= .. _git-config-basic: @@ -49,13 +51,13 @@ command:: To set up on another computer, you can copy your ``~/.gitconfig`` file, or run the commands above. -In detail +In Detail ========= user.name and user.email ------------------------ -It is good practice to tell git_ who you are, for labeling any changes +It is good practice to tell git_ who you are, for labelling any changes you make to the code. The simplest way to do this is from the command line:: @@ -122,7 +124,7 @@ Or from the command line:: .. _fancy-log: -Fancy log output +Fancy Log Output ---------------- This is a very nice alias to get a fancy log output; it should go in the @@ -137,22 +139,18 @@ You use the alias with:: and it gives graph / text output something like this (but with color!):: * 6d8e1ee - (HEAD, origin/my-fancy-feature, my-fancy-feature) NF - a fancy file (45 minutes ago) [Matthew Brett] - * d304a73 - (origin/placeholder, placeholder) Merge pull request #48 from hhuuggoo/master (2 weeks ago) [Jonathan Terhorst] - |\ + * d304a73 - (origin/placeholder, placeholder) Merge pull request #48 from hhuuggoo/main (2 weeks ago) [Jonathan Terhorst] + |\ | * 4aff2a8 - fixed bug 35, and added a test in test_bugfixes (2 weeks ago) [Hugo] - |/ + |/ * a7ff2e5 - Added notes on discussion/proposal made during Data Array Summit. (2 weeks ago) [Corran Webster] - * 68f6752 - Initial implimentation of AxisIndexer - uses 'index_by' which needs to be changed to a call on an Axes object - this is all very sketchy right now. (2 weeks ago) [Corr - * 376adbd - Merge pull request #46 from terhorst/master (2 weeks ago) [Jonathan Terhorst] - |\ + * 68f6752 - Initial implementation of AxisIndexer - uses 'index_by' which needs to be changed to a call on an Axes object - this is all very sketchy right now. (2 weeks ago) [Corr + * 376adbd - Merge pull request #46 from terhorst/main (2 weeks ago) [Jonathan Terhorst] + |\ | * b605216 - updated joshu example to current api (3 weeks ago) [Jonathan Terhorst] | * 2e991e8 - add testing for outer ufunc (3 weeks ago) [Jonathan Terhorst] | * 7beda5a - prevent axis from throwing an exception if testing equality with non-axis object (3 weeks ago) [Jonathan Terhorst] | * 65af65e - convert unit testing code to assertions (3 weeks ago) [Jonathan Terhorst] - | * 956fbab - Merge remote-tracking branch 'upstream/master' (3 weeks ago) [Jonathan Terhorst] - | |\ + | * 956fbab - Merge remote-tracking branch 'upstream/main' (3 weeks ago) [Jonathan Terhorst] + | |\ | |/ - -Thanks to Yury V. Zaytsev for posting it. - -.. include:: links.inc diff --git a/docs/src/developers_guide/gitwash/development_workflow.rst b/docs/src/developers_guide/gitwash/development_workflow.rst new file mode 100644 index 0000000000..8545a04308 --- /dev/null +++ b/docs/src/developers_guide/gitwash/development_workflow.rst @@ -0,0 +1,245 @@ +.. _development-workflow: + +#################### +Development Workflow +#################### + +You already have your own forked copy of the `iris`_ repository, by +following :ref:`forking`. You have :ref:`set-up-fork`. You have configured +git by following :ref:`configure-git`. Now you are ready for some real work. + +Workflow Summary +================ + +In what follows we'll refer to the upstream iris ``main`` branch, as +"trunk". + +* Don't use your ``main`` (that is on your fork) branch for development. +* When you are starting a new set of changes, fetch any changes from trunk, + and start a new *feature branch* from that. +* Make a new branch for each separable set of changes |emdash| "one task, one + branch". +* Name your branch for the purpose of the changes - e.g. + ``bugfix-for-issue-14`` or ``refactor-database-code``. +* If you can possibly avoid it, avoid merging trunk or any other branches into + your feature branch while you are working. +* If you do find yourself merging from trunk, consider :ref:`rebase-on-trunk` +* Ask on the Iris `GitHub Discussions`_ if you get stuck. +* Ask for code review! + +This way of working helps to keep work well organized, with readable history. +This in turn makes it easier for project maintainers (that might be you) to see +what you've done, and why you did it. + +See `linux git workflow`_ for some explanation. + +.. _update-mirror-trunk: + +Update the Mirror of Trunk +========================== + +First make sure you have done :ref:`linking-to-upstream`. + +From time to time you should fetch the upstream (trunk) changes from github:: + + git fetch upstream + +This will pull down any commits you don't have, and set the remote branches to +point to the right commit. For example, 'trunk' is the branch referred to by +(remote/branchname) ``upstream/main`` - and if there have been commits since +you last checked, ``upstream/main`` will change after you do the fetch. + +.. _make-feature-branch: + +Make a New Feature Branch +========================= + +When you are ready to make some changes to the code, you should start a new +branch. Branches that are for a collection of related edits are often called +'feature branches'. + +Making an new branch for each set of related changes will make it easier for +someone reviewing your branch to see what you are doing. + +Choose an informative name for the branch to remind yourself and the rest of us +what the changes in the branch are for. For example ``add-ability-to-fly``, or +``buxfix-for-issue-42``. + +:: + + # Update the mirror of trunk + git fetch upstream + # Make new feature branch starting at current trunk + git branch my-new-feature upstream/main + git checkout my-new-feature + +Generally, you will want to keep your feature branches on your public github_ +fork of `iris`_. To do this, you ``git push`` this new branch up to your +github repo. Generally (if you followed the instructions in these pages, and by +default), git will have a link to your github repo, called ``origin``. You push +up to your own repo on github with:: + + git push origin my-new-feature + +In git >= 1.7 you can ensure that the link is correctly set by using the +``--set-upstream`` option:: + + git push --set-upstream origin my-new-feature + +From now on git will know that ``my-new-feature`` is related to the +``my-new-feature`` branch in the github repo. + +.. _edit-flow: + +The Editing Workflow +==================== + +Overview +-------- + +:: + + # hack hack + git add my_new_file + git commit -am 'NF - some message' + git push + +In More Detail +-------------- + +#. Make some changes +#. See which files have changed with ``git status``. + You'll see a listing like this one:: + + # On branch ny-new-feature + # Changed but not updated: + # (use "git add ..." to update what will be committed) + # (use "git checkout -- ..." to discard changes in working directory) + # + # modified: README + # + # Untracked files: + # (use "git add ..." to include in what will be committed) + # + # INSTALL + no changes added to commit (use "git add" and/or "git commit -a") + +#. Check what the actual changes are with ``git diff``. +#. Add any new files to version control ``git add new_file_name``. +#. To commit all modified files into the local copy of your repo, do + ``git commit -am 'A commit message'``. Note the ``-am`` options to + ``commit``. The ``m`` flag just signals that you're going to type a + message on the command line. The ``a`` flag will automatically stage + all files that have been modified and deleted. +#. To push the changes up to your forked repo on github, do a ``git + push``. + + +Testing Your Changes +==================== + +Once you are happy with your changes, work thorough the :ref:`pr_check` and +make sure your branch passes all the relevant tests. + + +Ask for Your Changes to be Reviewed or Merged +============================================= + +When you are ready to ask for someone to review your code and consider a merge: + +#. Go to the URL of your forked repo, say + ``https://github.com/your-user-name/iris``. +#. Use the 'Switch Branches' dropdown menu near the top left of the page to + select the branch with your changes: + + .. image:: branch_dropdown.png + +#. Click on the 'Pull request' button: + + .. image:: pull_button.png + + Enter a title for the set of changes, and some explanation of what you've + done. Say if there is anything you'd like particular attention for - like a + complicated change or some code you are not happy with. + + If you don't think your request is ready to be merged, just say so in your + pull request message. This is still a good way of getting some preliminary + code review. + +Some Other Things you Might Want to do +====================================== + +Delete a Branch on Github +------------------------- + +:: + + git checkout main + # delete branch locally + git branch -D my-unwanted-branch + # delete branch on github + git push origin :my-unwanted-branch + +Note the colon ``:`` before ``test-branch``. See also: +https://github.com/guides/remove-a-remote-branch + + +Several People Sharing a Single Repository +------------------------------------------ + +If you want to work on some stuff with other people, where you are all +committing into the same repository, or even the same branch, then just +share it via github. + +First fork iris into your account, as from :ref:`forking`. + +Then, go to your forked repository github page, say +``https://github.com/your-user-name/iris``, select :guilabel:`Settings`, +:guilabel:`Manage Access` and then :guilabel:`Invite collaborator`. + +.. note:: For more information on sharing your repository see the + GitHub documentation on `Inviting collaborators`_. + + +.. _Inviting collaborators: https://docs.github.com/en/github/setting-up-and-managing-your-github-user-account/inviting-collaborators-to-a-personal-repository + +Now all those people can do:: + + git clone git@githhub.com:your-user-name/iris.git + +Remember that links starting with ``git@`` use the ssh protocol and are +read-write; links starting with ``git://`` are read-only. + +Your collaborators can then commit directly into that repo with the +usual:: + + git commit -am 'ENH - much better code' + git push origin main # pushes directly into your repo + +Explore Your Repository +----------------------- + +To see a graphical representation of the repository branches and +commits:: + + gitk --all + +To see a linear list of commits for this branch:: + + git log + +Finally the :ref:`fancy-log` ``lg`` alias will give you a reasonable text-based +graph of the repository. + + +.. _rebase-on-trunk: + +Rebasing on Trunk +----------------- + +For more information please see the +`official github documentation on git rebase`_. + +.. _official github documentation on git rebase: https://docs.github.com/en/github/using-git/about-git-rebase + +.. include:: links.inc diff --git a/docs/src/developers_guide/gitwash/forking.rst b/docs/src/developers_guide/gitwash/forking.rst new file mode 100644 index 0000000000..baeb243c86 --- /dev/null +++ b/docs/src/developers_guide/gitwash/forking.rst @@ -0,0 +1,36 @@ +.. include:: links.inc + +.. _forking: + +=================================== +Making Your own Copy (fork) of Iris +=================================== + +You need to do this only once. The instructions here are very similar +to the instructions at https://help.github.com/forking/, please see +that page for more detail. We're repeating some of it here just to give the +specifics for the `Iris`_ project, and to suggest some default names. + + +Set up and Configure a Github Account +===================================== + +If you don't have a github account, go to the github page, and make one. + +You then need to configure your account to allow write access, see +the `generating ssh keys for GitHub`_ help on `github help`_. + + +Create Your own Forked Copy of Iris +=================================== + +#. Log into your github account. +#. Go to the `Iris`_ github home at `Iris github`_. +#. Click on the *fork* button: + + .. image:: forking_button.png + + Now, after a short pause, you should find yourself at the home page for + your own forked copy of `Iris`_. + + diff --git a/docs/src/developers_guide/gitwash/forking_button.png b/docs/src/developers_guide/gitwash/forking_button.png new file mode 100644 index 0000000000..1eb21051d1 Binary files /dev/null and b/docs/src/developers_guide/gitwash/forking_button.png differ diff --git a/docs/src/developers_guide/gitwash/git_intro.rst b/docs/src/developers_guide/gitwash/git_intro.rst new file mode 100644 index 0000000000..dfb64da872 --- /dev/null +++ b/docs/src/developers_guide/gitwash/git_intro.rst @@ -0,0 +1,15 @@ +.. include:: links.inc + +Introduction +============ + +These pages describe a git_ and github_ workflow for the `Iris`_ +project. + +This is not a comprehensive git reference, it's just a workflow for our +own project. It's tailored to the github hosting service. You may well +find better or quicker ways of getting stuff done with git, but these +should get you started. + +.. tip:: Please see the official `git documentation`_ for a complete list of + git **commands** and **cheat sheets**. \ No newline at end of file diff --git a/docs/src/developers_guide/gitwash/git_links.inc b/docs/src/developers_guide/gitwash/git_links.inc new file mode 100644 index 0000000000..3ced13703f --- /dev/null +++ b/docs/src/developers_guide/gitwash/git_links.inc @@ -0,0 +1,18 @@ +.. This (-*- rst -*-) format file contains commonly used link targets + and name substitutions. It may be included in many files, + therefore it should only contain link targets and name + substitutions. Try grepping for "^\.\. _" to find plausible + candidates for this list. + +.. NOTE: reST targets are + __not_case_sensitive__, so only one target definition is needed for + nipy, NIPY, Nipy, etc... + +.. _git: https://git-scm.com/ +.. _github: https://github.com +.. _github help: https://help.github.com +.. _git documentation: https://git-scm.com/docs + +.. _linux git workflow: https://www.mail-archive.com/dri-devel@lists.sourceforge.net/msg39091.html + +.. |emdash| unicode:: U+02014 diff --git a/docs/src/developers_guide/gitwash/index.rst b/docs/src/developers_guide/gitwash/index.rst new file mode 100644 index 0000000000..3cde622583 --- /dev/null +++ b/docs/src/developers_guide/gitwash/index.rst @@ -0,0 +1,14 @@ +.. _using-git: + +Working With Iris Source Code +============================= + +.. toctree:: + :maxdepth: 2 + + git_intro + forking + set_up_fork + configure_git + development_workflow + diff --git a/docs/src/developers_guide/gitwash/links.inc b/docs/src/developers_guide/gitwash/links.inc new file mode 100644 index 0000000000..1d67f20d3a --- /dev/null +++ b/docs/src/developers_guide/gitwash/links.inc @@ -0,0 +1,3 @@ +.. include:: ../../common_links.inc + +.. include:: git_links.inc diff --git a/docs/src/developers_guide/gitwash/pull_button.png b/docs/src/developers_guide/gitwash/pull_button.png new file mode 100644 index 0000000000..a088e19e95 Binary files /dev/null and b/docs/src/developers_guide/gitwash/pull_button.png differ diff --git a/docs/src/developers_guide/gitwash/set_up_fork.rst b/docs/src/developers_guide/gitwash/set_up_fork.rst new file mode 100644 index 0000000000..5318825488 --- /dev/null +++ b/docs/src/developers_guide/gitwash/set_up_fork.rst @@ -0,0 +1,70 @@ +.. include:: links.inc + +.. _set-up-fork: + +================ +Set up Your Fork +================ + +First you follow the instructions for :ref:`forking`. + +Overview +======== + +:: + + git clone git@github.com:your-user-name/iris.git + cd iris + git remote add upstream git@github.com/SciTools/iris.git + +In Detail +========= + +Clone Your Fork +--------------- + +#. Clone your fork to the local computer with ``git clone + git@github.com:your-user-name/iris.git`` +#. Change directory to your new repo: ``cd iris``. Then + ``git branch -a`` to show you all branches. You'll get something + like:: + + * main + remotes/origin/main + + This tells you that you are currently on the ``main`` branch, and + that you also have a ``remote`` connection to ``origin/main``. + What remote repository is ``remote/origin``? Try ``git remote -v`` to + see the URLs for the remote. They will point to your github fork. + + Now you want to connect to the upstream `Iris github`_ repository, so + you can merge in changes from trunk. + +.. _linking-to-upstream: + +Linking Your Repository to the Upstream Repo +-------------------------------------------- + +:: + + cd iris + git remote add upstream git://github.com/SciTools/iris.git + +``upstream`` here is just the arbitrary name we're using to refer to the +main `Iris`_ repository at `Iris github`_. + +Note that we've used ``git://`` for the URL rather than ``git@``. The +``git://`` URL is read only. This means we that we can't accidentally +(or deliberately) write to the upstream repo, and we are only going to +use it to merge into our own code. + +Just for your own satisfaction, show yourself that you now have a new +'remote', with ``git remote -v``, giving you something like:: + + upstream git://github.com/SciTools/iris.git (fetch) + upstream git://github.com/SciTools/iris.git (push) + origin git@github.com:your-user-name/iris.git (fetch) + origin git@github.com:your-user-name/iris.git (push) + + + diff --git a/docs/src/developers_guide/propose_changes.png b/docs/src/developers_guide/propose_changes.png new file mode 100755 index 0000000000..d4e367ce1d Binary files /dev/null and b/docs/src/developers_guide/propose_changes.png differ diff --git a/docs/src/developers_guide/pull_request.png b/docs/src/developers_guide/pull_request.png new file mode 100755 index 0000000000..bdc0698f54 Binary files /dev/null and b/docs/src/developers_guide/pull_request.png differ diff --git a/docs/src/developers_guide/release.rst b/docs/src/developers_guide/release.rst new file mode 100644 index 0000000000..c7ce230204 --- /dev/null +++ b/docs/src/developers_guide/release.rst @@ -0,0 +1,289 @@ +.. include:: ../common_links.inc + +.. _iris_development_releases: + +Releases +======== + +A release of Iris is a `tag on the SciTools/Iris`_ Github repository. + +Below is :ref:`iris_development_releases_steps`, followed by some prose on the +main areas that constitute the release. + + +.. _iris_development_releases_steps: + +How to Create an Iris Release +----------------------------- + +The step-by-step process is walked-through by a script at: +``/tools/release_do_nothing.py``, and also available here: +:doc:`release_do_nothing`. + + +.. _release_manager: + +Release Manager +--------------- + +A Release Manager will be nominated for each release of Iris. This role involves: + +* deciding which features and bug fixes should be included in the release +* managing the `GitHub Projects`_ board for the release +* using :discussion:`GitHub Discussion releases category ` + for documenting intent and capturing any + discussion about the release +* holding a developer retrospective post release, to look for potential + future improvements + +The Release Manager will make the release, ensuring that all the steps outlined +on this page are completed. + + +Versioning +---------- + +Iris' version numbers conform to `Semantic Versioning`_ (``MAJOR.MINOR.PATCH``) +and `PEP 440`_. + +Iris uses `setuptools-scm`_ to automatically manage versioning based on Git +tags. No manual versioning work is required within the files themselves. + + +Release Candidate +----------------- + +Prior to a release, a release candidate tag may be created, marked as a +pre-release in GitHub, with a tag ending with :literal:`rc` followed by a +number (0-based), e.g.,: + + :literal:`v1.9.0rc0` + +If created, the pre-release shall be available for a minimum of 2 weeks +prior to the release being cut. However a 4 week period should be the goal +to allow user groups to be notified of the existence of the pre-release and +encouraged to test the functionality. + +A pre-release is expected for a major or minor release, but not for a +patch release. + +If new features are required for a release after a release candidate has been +cut, a new pre-release shall be issued first. + +Release candidates are made available as a conda package on the +`conda-forge Anaconda channel`_ using the `rc_iris`_ label. This is achieved via +the `conda-forge iris-feedstock`_ following `CFEP-05`_. For further information +see the `conda-forge User Documentation`_. + + +Patch Releases +-------------- + +Patch releases may be implemented to fix problems with previous major or minor +releases. E.g. ``v1.9.1`` to fix a problem in ``v1.9.0``, both being part of +the ``v1.9`` series. + +New features shall not be included in a patch release, these are for bug fixes. + +A patch release does not require a release candidate, but the rest of the +release process is to be followed. + + +Before Release +-------------- + +Deprecations +~~~~~~~~~~~~ + +Any behaviour which has been deprecated for the correct number of +previous releases is now finally changed. More detail, including the correct +number of releases, is in :ref:`iris_development_deprecations`. + +Standard Names +~~~~~~~~~~~~~~ + +The file ``etc/cf-standard-name-table.xml`` is updated to the latest CF standard names, +from the `latest CF standard names`_. +( This is used during build to automatically generate the sourcefile +``lib/iris/std_names.py``. ) + + +The Release +----------- + +Release Branch +~~~~~~~~~~~~~~ + +Once the features intended for the release are on ``main``, a release branch +should be created, in the ``SciTools/iris`` repository. This will have the name: + + :literal:`v{major release number}.{minor release number}.x` + +for example: + + :literal:`v1.9.x` + +This branch shall be used to finalise the release details in preparation for +the release candidate. + +Changes for a **patch release** should target to the same release branch as the +rest of the series. For example, a fix +for a problem with the ``v1.9.0`` release will be merged into ``v1.9.x`` release +branch, and then released with the tag ``v1.9.1``. + +Documentation +~~~~~~~~~~~~~ + +The documentation should include a dedicated What's New file for this release +series (e.g. ``v1.9.rst``), incorporating all of the What's New entries for the release. +This content should be reviewed and adapted as required, including highlights +at the top of the What's New document. + +What's New entries for **patch releases** should be added to the existing file +for that release series (e.g. ``v1.9.1`` section in the ``v1.9.rst`` file). + +A template for What's New formatting can be found in the +``docs/src/whatsnew/latest.rst.template`` file. + + +Tagging +~~~~~~~ + +Once all checks are complete, the release is published from the release +branch - via the GitHub release functionality in the ``SciTools/iris`` +repository - which simultaneously creates a Git tag for the release. + + +Post Release +------------ + +PyPI +~~~~ +Iris is available on PyPI as ``scitools-iris``. + +Iris' Continuous-Integration (CI) includes the automatic building and publishing of +PyPI artifacts in a dedicated GitHub Action. + +Legacy manual instructions are appended to this page for reference purposes +(:ref:`update_pypi`) + +conda-forge +~~~~~~~~~~~ + +Iris is available on conda-forge as ``iris``. + +This is managed via the the Iris conda recipe on the +`conda-forge iris-feedstock`_, which is updated after the release is cut on +GitHub, followed by automatic build and publish of the +conda package on the `conda-forge Anaconda channel`_. + +Announcement +~~~~~~~~~~~~ + +Iris uses Twitter (`@scitools_iris`_) to announce new releases, as well as any +internal message boards that are accessible (e.g. at the UK Met Office). +Announcements usually include a highlighted feature to hook readers' attention. + +Citation +~~~~~~~~ + +``docs/src/userguide/citation.rst`` is updated to include +the latest [non-release-candidate] version, date and `Zenodo DOI`_ +of the new release. Ideally this would be updated before the release, but +the DOI for the new version is only available once the release has been +created in GitHub. + +Merge Back +~~~~~~~~~~ + +After any release is published, **including patch releases**, the changes from the +release branch should be merged back onto the ``SciTools/iris`` ``main`` branch. + + +Appendices +---------- + +.. _update_pypi: + +Updating PyPI Manually +~~~~~~~~~~~~~~~~~~~~~~ + +.. note:: + + As part of our Continuous-Integration (CI), the building and publishing of + PyPI artifacts is now automated by a dedicated GitHub Action. + + The following instructions **no longer** require to be performed manually, + but remain part of the documentation for reference purposes only. + +Update the `scitools-iris`_ project on PyPI with the latest Iris release. + +To do this perform the following steps. + +Create a conda environment with the appropriate conda packages to build the +source distribution (``sdist``) and pure Python wheel (``bdist_wheel``):: + + > conda create -n iris-pypi -c conda-forge --yes build twine + > . activate iris-pypi + +Checkout the appropriate Iris ```` tag from the appropriate ````. +For example, to checkout tag ``v1.0`` from ``upstream``:: + + > git fetch upstream --tags + > git checkout v1.0 + +Build the source distribution and wheel from the Iris root directory:: + + > python -m build + +This ``./dist`` directory should now be populated with the source archive +``.tar.gz`` file, and built distribution ``.whl`` file. + +Check that the package description will render properly on PyPI for each +of the built artifacts:: + + > python -m twine check dist/* + +To list and check the contents of the binary wheel:: + + > python -m zipfile --list dist/*.whl + +If all seems well, sufficient maintainer privileges will be required to +upload these artifacts to `scitools-iris`_ on PyPI:: + + > python -m twine upload --repository-url https://upload.pypi.org/legacy/ dist/* + +Ensure that the artifacts are successfully uploaded and available on +`scitools-iris`_ before creating a conda test environment to install Iris +from PyPI:: + + > conda deactivate + > conda env create --file ./requirements/iris.yml + > . activate iris-dev + > python -m pip install --no-deps scitools-iris + +For further details on how to test Iris, see :ref:`developer_running_tests`. + +.. seealso:: + + For further information on packaging and uploading a project to PyPI, please + refer to `Generating Distribution Archives`_ and `Packaging Your Project`_. + +.. _SciTools/iris: https://github.com/SciTools/iris +.. _tag on the SciTools/Iris: https://github.com/SciTools/iris/releases +.. _conda-forge Anaconda channel: https://anaconda.org/conda-forge/iris +.. _conda-forge iris-feedstock: https://github.com/conda-forge/iris-feedstock +.. _CFEP-05: https://github.com/conda-forge/cfep/blob/master/cfep-05.md +.. _conda-forge User Documentation: https://conda-forge.org/docs/user/00_intro.html +.. _Active Versions: https://readthedocs.org/projects/scitools-iris/versions/ +.. _Editing v3.0.0rc0: https://readthedocs.org/dashboard/scitools-iris/version/v3.0.0rc0/edit +.. _rc_iris: https://anaconda.org/conda-forge/iris/labels +.. _Generating Distribution Archives: https://packaging.python.org/tutorials/packaging-projects/#generating-distribution-archives +.. _Packaging Your Project: https://packaging.python.org/guides/distributing-packages-using-setuptools/#packaging-your-project +.. _latest CF standard names: https://cfconventions.org/Data/cf-standard-names/current/src/cf-standard-name-table.xml +.. _setuptools-scm: https://github.com/pypa/setuptools_scm +.. _Semantic Versioning: https://semver.org/ +.. _PEP 440: https://peps.python.org/pep-0440/ +.. _@scitools_iris: https://twitter.com/scitools_iris +.. _GitHub Projects: https://github.com/SciTools/iris/projects +.. _Zenodo DOI: https://doi.org/10.5281/zenodo.595182 diff --git a/docs/src/developers_guide/release_do_nothing.rst b/docs/src/developers_guide/release_do_nothing.rst new file mode 100644 index 0000000000..1f72827184 --- /dev/null +++ b/docs/src/developers_guide/release_do_nothing.rst @@ -0,0 +1,12 @@ +:orphan: + +Release Do-Nothing Script +------------------------- + +Rendered from the original ``/tools/release_do_nothing.py``. + +`Read more about do-nothing scripts +`_ + +.. literalinclude:: ../../../tools/release_do_nothing.py + :language: python diff --git a/docs/src/developers_guide/testing_tools.rst b/docs/src/developers_guide/testing_tools.rst new file mode 100755 index 0000000000..dd628d37fc --- /dev/null +++ b/docs/src/developers_guide/testing_tools.rst @@ -0,0 +1,80 @@ +.. include:: ../common_links.inc + +.. _testing_tools: + +Testing tools +************* + +Iris has various internal convenience functions and utilities available to +support writing tests. Using these makes tests quicker and easier to write, and +also consistent with the rest of Iris (which makes it easier to work with the +code). Most of these conveniences are accessed through the +:class:`iris.tests.IrisTest` class, from +which Iris' test classes then inherit. + +.. tip:: + + All functions listed on this page are defined within + :mod:`iris.tests.__init__.py` as methods of + :class:`iris.tests.IrisTest_nometa` (which :class:`iris.tests.IrisTest` + inherits from). They can be accessed within a test using + ``self.exampleFunction``. + +Custom assertions +================= + +:class:`iris.tests.IrisTest` supports a variety of custom unittest-style +assertions, such as :meth:`~iris.tests.IrisTest_nometa.assertArrayEqual`, +:meth:`~iris.tests.IrisTest_nometa.assertArrayAlmostEqual`. + +.. _create-missing: + +Saving results +-------------- + +Some tests compare the generated output to the expected result contained in a +file. Custom assertions for this include +:meth:`~iris.tests.IrisTest_nometa.assertCMLApproxData` +:meth:`~iris.tests.IrisTest_nometa.assertCDL` +:meth:`~iris.tests.IrisTest_nometa.assertCML` and +:meth:`~iris.tests.IrisTest_nometa.assertTextFile`. See docstrings for more +information. + +.. note:: + + Sometimes code changes alter the results expected from a test containing the + above methods. These can be updated by removing the existing result files + and then running the file containing the test with a ``--create-missing`` + command line argument, or setting the ``IRIS_TEST_CREATE_MISSING`` + environment variable to anything non-zero. This will create the files rather + than erroring, allowing you to commit the updated results. + +Context managers +================ + +Capturing exceptions and logging +-------------------------------- + +:class:`iris.tests.IrisTest` includes several context managers that can be used +to make test code tidier and easier to read. These include +:meth:`~iris.tests.IrisTest_nometa.assertWarnsRegexp` and +:meth:`~iris.tests.IrisTest_nometa.assertLogs`. + +Temporary files +--------------- + +It's also possible to generate temporary files in a concise fashion with +:meth:`~iris.tests.IrisTest_nometa.temp_filename`. + +Patching +======== + +:meth:`~iris.tests.IrisTest_nometa.patch` is a wrapper around ``unittest.patch`` +that will be automatically cleaned up at the end of the test. + +Graphic tests +============= + +As a package capable of generating graphical outputs, Iris has utilities for +creating and updating graphical tests - see :ref:`testing.graphics` for more +information. \ No newline at end of file diff --git a/docs/src/further_topics/dask_best_practices/dask_bags_and_greed.rst b/docs/src/further_topics/dask_best_practices/dask_bags_and_greed.rst new file mode 100644 index 0000000000..272ea6fc08 --- /dev/null +++ b/docs/src/further_topics/dask_best_practices/dask_bags_and_greed.rst @@ -0,0 +1,235 @@ +.. _examples_bags_greed: + +3. Dask Bags and Greedy Parallelism +----------------------------------- + +Here is a journey that demonstrates: + +* How to apply dask.bags to an existing script +* The equal importance of optimisation of non-parallel parts of a script +* Protection against multiple software trying to manage parallelism + simultaneously + + +3.1 The Problem - Slow Loading +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +We have ~7000 GRIB files spread between 256 dated directories:: + + . + |-- 20180401 + | |-- gfs.t00z.icing.0p25.grb2f006 + | |-- gfs.t00z.icing.0p25.grb2f006.1 + | |-- gfs.t00z.icing.0p25.grb2f012 + | |-- gfs.t00z.icing.0p25.grb2f018 + | |-- gfs.t00z.icing.0p25.grb2f024 + | |-- gfs.t00z.icing.0p25.grb2f030 + | `-- gfs.t00z.icing.0p25.grb2f036 + |-- 20180402 + | `-- gfs.t00z.icing.0p25.grb2f006 + |-- 20180403 + | |-- gfs.t12z.icing.0p25.grb2f006 + | |-- gfs.t12z.icing.0p25.grb2f012 + +With this script, a sample of 11 GRIB files takes ~600secs to load:: + + import iris + import glob + + fpaths=glob.glob('20190416/*t18z*f???') + cubes = iris.load(fpaths, callback=callback) + + def callback(cube, field, fname): + if field.sections[5]['bitsPerValue'] == 0: + raise iris.exceptions.IgnoreCubeException + if field.sections[4]['parameterNumber'] == 20: + raise iris.exceptions.IgnoreCubeException + elif field.sections[4]['parameterNumber'] == 234: + cube.long_name = 'Icing Severity' + +3.2 Parallelisation +^^^^^^^^^^^^^^^^^^^ +We'll try using `dask.bag `_ to +parallelise the function calls. It's important that Dask is given the freedom +to break the task down in an efficient manner - the function that is mapped +across the bag should only load a single file, and the bag itself can +iterate through the list of files. Here's the restructured script:: + + import glob + import multiprocessing + import os + + import dask + import dask.bag as db + import iris + + def callback(cube, field, fname): + if field.sections[5]['bitsPerValue'] == 0: + raise iris.exceptions.IgnoreCubeException + if field.sections[4]['parameterNumber'] == 20: + raise iris.exceptions.IgnoreCubeException + elif field.sections[4]['parameterNumber'] == 234: + cube.long_name = 'Icing Severity' + + def func(fname): + return iris.load_cube(fname, callback=callback) + + fpaths = list(glob.glob('20190416/*t18z*f???')) + + # Determine the number of processors visible .. + cpu_count = multiprocessing.cpu_count() + + # .. or as given by slurm allocation. + # Only relevant when using Slurm for job scheduling + if 'SLURM_NTASKS' in os.environ: + cpu_count = os.environ['SLURM_NTASKS'] + + # Do not exceed the number of CPUs available, leaving 1 for the system. + num_workers = cpu_count - 1 + print('Using {} workers from {} CPUs...'.format(num_workers, cpu_count)) + + # Now do the parallel load. + with dask.config.set(num_workers=num_workers): + bag = db.from_sequence(fpaths).map(func) + cubes = iris.cube.CubeList(bag.compute()).merge() + +This achieves approximately a 10-fold improvement if enough CPUs are +available to have one per file. See this benchmarking: + ++---------------+-----------------------+---------------+---------------+ +| Machine | CPUs Available | CPUs Used | Time Taken | ++===============+=======================+===============+===============+ +| A | 4 | 3 | 4m 05s | +| | +---------------+---------------+ +| | | 4 | 3m 22s | ++---------------+-----------------------+---------------+---------------+ +| B | 8 | 1 | 9m 10s | +| | +---------------+---------------+ +| | | 7 | 2m 35s | +| | +---------------+---------------+ +| | | 8 | 2m 20s | ++---------------+-----------------------+---------------+---------------+ + + +.. _examples_bags_greed_profile: + +3.3 Profiling +^^^^^^^^^^^^^ +1m 10s is still a surprisingly long time. When faced with a mystery like +this it is helpful to profile the script to see if there are any steps that +are taking more time than we would expect. For this we use a tool called +`kapture `_ to produce a +flame chart visualising the time spent performing each call: + +.. image:: images/grib-bottleneck.png + :width: 1000 + :align: center + +From this we see that 96% of the runtime is taken by this call:: + + res = gribapi.grib_get_array(self._message_id, key) + +This is the call being used during the ``callback`` function when it uses +GRIB messages to filter out cubes with certain unwanted properties. + +3.4 Improving GRIB Key Handling +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Even with parallelisation, we are still limited by the time it takes to run +a single instance of a function. This is going to become much more important +when running 7000 files instead of 11, since there will be nowhere near +enough CPUs even on a large multi-processing system, meaning each CPU will be running many instances +of the function. **Parallelisation can only go so far to solving speed issues** -- +it's effectively the 'brute force' method. + +:ref:`examples_bags_greed_profile` showed us where the major bottleneck is. To improve efficiency +we can re-write the script to filter on GRIB messages *before* converting +the GRIB file to a cube:: + + import dask + import dask.bag as db + import glob + import iris + import multiprocessing + import os + + def func(fname): + import iris + from iris_grib import load_pairs_from_fields + from iris_grib.message import GribMessage # perform GRIB message level filtering... + filtered_messages = [] + for message in GribMessage.messages_from_filename(fname): + if (message.sections[5]['bitsPerValue'] != 0 and + message.sections[4]['parameterNumber'] == 234): + filtered_messages.append(message) # now convert the messages to cubes... + cubes = [cube for cube, message in load_pairs_from_fields(filtered_messages)] + return iris.cube.CubeList(cubes).merge_cube() + + fpaths = list(glob.glob('/scratch/frcz/ICING/GFS_DATA/20190416/*t18z*f???')) + cpu_count = multiprocessing.cpu_count() + + # Only relevant when using Slurm for job scheduling + if 'SLURM_NTASKS' in os.environ: + cpu_count = os.environ['SLURM_NTASKS'] + + num_workers = cpu_count - 1 + + print('Using {} workers from {} CPUs...'.format(num_workers, cpu_count)) + with dask.config.set(num_workers=num_workers): + bag = db.from_sequence(fpaths).map(func) + cubes = iris.cube.CubeList(bag.compute()) + +This achieves a significant performance improvement - more than twice as +fast as the previous benchmarks: + ++---------------+-----------------------+---------------+---------------+-----------+ +| Machine | CPUs Available | CPUs Used | Previous Time | New Time | ++===============+=======================+===============+===============+===========+ +| Example | 8 | 7 | 2m 35s | 1m 05s | +| | +---------------+---------------+-----------+ +| | | 8 | 2m 20s | 1m 03s | ++---------------+-----------------------+---------------+---------------+-----------+ + +3.5 Managing External Factors +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +The speed will still need to be further improved before we can process 7000 +files. The main gains we can achieve are by making sure it is **only Dask** +that manages multi-processing - if multi-processing is coming from more +than one place there are predictable clashes. + +First, NumPy must be prevented from performing it's own multi-processing by +adding the following **before** ``import numpy`` is called. You can read more +about this in :ref:`numpy_threads`. + +:: + + import os + + os.environ["OMP_NUM_THREADS"] = "1" + os.environ["OPENBLAS_NUM_THREADS"] = "1" + os.environ["MKL_NUM_THREADS"] = "1" + os.environ["VECLIB_MAXIMUM_THREADS"] = "1" + os.environ["NUMEXPR_NUM_THREADS"] = "1" + +Lastly, if you are using SLURM on the computing cluster then SLURM must be configured to prevent it +optimising the number of cores necessary for the job. See the SLURM commands +below, to be added before running the python script. It's important that +``ntasks`` matches the number of CPUs specified in the python script. You +can read more about these points in :ref:`multi-pro_slurm`. + +:: + + #SBATCH --ntasks=12 + #SBATCH --ntasks-per-core=1 + +This has all been based on a real example. Once all the above had been set +up correctly, the completion time had dropped from an estimated **55 days** +to **less than 1 day**. + +3.6 Lessons +^^^^^^^^^^^ +* Dask isn't a magic switch - it's important to write your script so that + there is a way to create small sub-tasks. In this case by providing + dask.bag with the file list and the function separated +* Parallelism is not the only performance improvement to try - the script + will still be slow if the individual function is slow +* All multi-processing needs to be managed by Dask. Several other factors + may introduce multi-processing and these need to be configured not to diff --git a/docs/src/further_topics/dask_best_practices/dask_parallel_loop.rst b/docs/src/further_topics/dask_best_practices/dask_parallel_loop.rst new file mode 100644 index 0000000000..836503314c --- /dev/null +++ b/docs/src/further_topics/dask_best_practices/dask_parallel_loop.rst @@ -0,0 +1,169 @@ +.. _examples_parallel_loop: + +2. Parallelising a Loop of Multiple Calls to a Third Party Library +------------------------------------------------------------------ + +Whilst Iris does provide extensive functionality for performing statistical and +mathematical operations on your data, it is sometimes necessary to use a third +party library. + +The following example describes a real world use case of how to parallelise +multiple calls to a third party library using dask bags. + +2.1 The Problem - Parallelising a Loop +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +In this particular example, the user is calculating a sounding parcel for each +column in their dataset. The cubes that are used are of shape:: + + (model_level_number: 20; grid_latitude: 1536; grid_longitude: 1536) + +As a sounding is calculated for each column, this means there are 1536x1536 +individual calculations. + +In Python, it is common practice to vectorize the calculation of for loops. +Vectorising is done by using NumPy to operate on the whole array at once rather +than a single element at a time. Unfortunately, not all operations are +vectorisable, including the calculation in this example, and so we look to +other methods to improve the performance. + +2.2 Original Code with Loop +^^^^^^^^^^^^^^^^^^^^^^^^^^^ +We start out by loading cubes of pressure, temperature, dewpoint temperature and height:: + + import iris + import numpy as np + from skewt import SkewT as sk + + pressure = iris.load_cube('a.press.19981109.pp') + temperature = iris.load_cube('a.temp.19981109.pp') + dewpoint = iris.load_cube('a.dewp.19981109.pp') + height = iris.load_cube('a.height.19981109.pp') + +We set up the NumPy arrays we will be filling with the output data:: + + output_arrays = [np.zeros(pressure.shape[0]) for _ in range(6)] + cape, cin, lcl, lfc, el, tpw = output_data + +Now we loop over the columns in the data to calculate the soundings:: + + for y in range(nlim): + for x in range(nlim): + mydata = {'pres': pressure[:, y, x], + 'temp': temperature[:, y, x], + 'dwpt': dewpoint[:, y, x], + 'hght': height[:, y, x]} + + # Calculate the sounding with the selected column of data. + S = sk.Sounding(soundingdata=mydata) + try: + startp, startt, startdp, type_ = S.get_parcel(parcel_def) + P_lcl, P_lfc, P_el, CAPE, CIN = S.get_cape( + startp, startt, startdp, totalcape='tot') + TPW = S.precipitable_water() + except: + P_lcl, P_lfc, P_el, CAPE, CIN, TPW = [ + np.ma.masked for _ in range(6)] + + # Fill the output arrays with the results + cape[y,x] = CAPE + cin[y,x] = CIN + lcl[y,x] = P_lcl + lfc[y,x] = P_lfc + el[y,x] = P_el + tpw[y,x] = TPW + +2.3 Profiling the Code with Kapture +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Kapture is a useful statistical profiler. For more information see `the +Kapture repo `_. + +Results below: + +.. image:: images/loop_third_party_kapture_results.png + :width: 1000 + :align: center + +As we can see above, (looking at the highlighted section of the red bar) it spends most of the time in the call to :: + + S.get_parcel(parcel_def) + +As there are over two million columns in the data, we would greatly benefit +from parallelising this work. + +2.4 Parallelising with Dask Bags +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Dask bags are collections of Python objects that you can map a computation over +in a parallel manner. + +For more information about dask bags, see the `Dask Bag Documentation +`_. + +Dask bags work best with lightweight objects, so we will create a collection of +indices into our data arrays. + +First, we put the loop into a function that takes a slice object to index the +appropriate section of the array.:: + + def calculate_sounding(y_slice): + for y in range(y_slice.stop-y_slice.start): + for x in range(nlim): + mydata = {'pres': pressure[:, y_slice][:, y, x], + 'temp': temperature[:, y_slice][:, y, x], + 'dwpt': dewpoint[:, y_slice][:, y, x], + 'hght': height[:, y_slice][:, y, x]} + + # Calculate the sounding with the selected column of data. + S = sk.Sounding(soundingdata=mydata) + try: + startp, startt, startdp, type_ = S.get_parcel(parcel_def) + P_lcl, P_lfc, P_el, CAPE, CIN = S.get_cape( + startp, startt, startdp, totalcape=total_cape) + TPW = S.precipitable_water() + except: + P_lcl, P_lfc, P_el, CAPE, CIN, TPW = [ + np.ma.masked for _ in range(6)] + + # Fill the output arrays with the results + cape[:, y_slice][y,x] = CAPE + cin[:, y_slice][y,x] = CIN + lcl[:, y_slice][y,x] = P_lcl + lfc[:, y_slice][y,x] = P_lfc + el[:, y_slice][y,x] = P_el + tpw[:, y_slice][y,x] = TPW + +Then we create a dask bag of slice objects that will create multiple partitions +along the y axis.:: + + num_of_workers = 4 + len_of_y_axis = pressure.shape[1] + + part_loc = [int(loc) for loc in np.floor(np.linspace(0, len_of_y_axis, + num_of_workers + 1))] + + dask_bag = db.from_sequence( + [slice(part_loc[i], part_loc[i+1]) for i in range(num_of_workers)]) + + with dask.config.set(scheduler='processes'): + dask_bag.map(calculate_sounding).compute() + +When this was run on a machine with 4 workers, a speedup of ~4x was achieved, +as expected. + +Note that if using the processes scheduler this is some extra time spent +serialising the data to pass it between workers. For more information on the +different schedulers available in Dask, see `Dask Scheduler Overview +`_. + +For more speed up, it is possible to run the same code on a multi-processing +system where you will have access to more CPUs. + +In this particular example, we are handling multiple numpy arrays and so we use +dask bags. If working with a single numpy array, it may be more appropriate to +use Dask Arrays (see `Dask Arrays +`_ for more information). + + +2.5 Lessons +^^^^^^^^^^^ +* If possible, dask bags should contain lightweight objects +* Minimise the number of tasks that are created diff --git a/docs/src/further_topics/dask_best_practices/dask_pp_to_netcdf.rst b/docs/src/further_topics/dask_best_practices/dask_pp_to_netcdf.rst new file mode 100644 index 0000000000..28784154b4 --- /dev/null +++ b/docs/src/further_topics/dask_best_practices/dask_pp_to_netcdf.rst @@ -0,0 +1,92 @@ +.. _examples_pp_to_ff: + +1. Speed up Converting PP Files to NetCDF +----------------------------------------- + +Here is an example of how dask objects can be tuned for better performance. + +1.1 The Problem - Slow Saving +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +We have ~300 PP files which we load as follows: + +.. code-block:: python + + import iris + import glob + + files = glob.glob("pp_files/*.pp") + cube = iris.load_cube(files, "mass_fraction_of_ozone_in_air") + +Note that loading here may also be parallelised in a similar manner as +described in :ref:`examples_bags_greed`. Either way, the resulting cube looks +as follows: + +.. code-block:: text + + mass_fraction_of_ozone_in_air / (kg kg-1) (time: 276; model_level_number: 85; latitude: 144; longitude: 192) + Dimension coordinates: + time x - - - + model_level_number - x - - + latitude - - x - + longitude - - - x + Auxiliary coordinates: + forecast_period x - - - + level_height - x - - + sigma - x - - + Scalar coordinates: + forecast_reference_time: 1850-01-01 00:00:00 + Attributes: + STASH: m01s34i001 + source: Data from Met Office Unified Model + um_version: 10.9 + Cell methods: + mean: time (1 hour) + +The cube is then immediately saved as a netCDF file. + +.. code-block:: python + + nc_chunks = [chunk[0] for chunk in cube.lazy_data().chunks] + iris.save(cube, "outfile.nc", nc_chunks) + +This operation was taking longer than expected and we would like to improve +performance. Note that when this cube is being saved, the data is still lazy, +data is both read and written at the saving step and is done so in chunks. +The way this data is divided into chunks can affect performance. By tweaking +the way these chunks are structured it may be possible to improve performance +when saving. + + +.. _dask_rechunking: + +1.2 Rechunking +^^^^^^^^^^^^^^ +We may inspect the cube's lazy data before saving: + +.. code-block:: python + + # We can access the cubes Dask array + lazy_data = cube.lazy_data() + # We can find the shape of the chunks + # Note that the chunksize of a Dask array is the shape of the chunk + # as a tuple. + print(lazy_data.chunksize) + +Doing so, we find that the chunks currently have the shape:: + +(1, 1, 144, 192) + +This is significantly smaller than the `size which Dask recommends +`_. Bear in mind that the +ideal chunk size depends on the platform you are running on (for this example, +the code is being run on a desktop with 8 CPUs). In this case, we have 23460 +small chunks. We can reduce the number of chunks by rechunking before saving: + +.. code-block:: python + + lazy_data = cube.lazy_data() + lazy_data = lazy_data.rechunk(1, 85, 144, 192) + cube.data = lazy_data + +We now have 276 moderately sized chunks. When we try saving again, we find +that it is approximately 4 times faster, saving in 2m13s rather than 10m33s. diff --git a/docs/src/further_topics/dask_best_practices/images/grib-bottleneck.png b/docs/src/further_topics/dask_best_practices/images/grib-bottleneck.png new file mode 100644 index 0000000000..c029d57e5e Binary files /dev/null and b/docs/src/further_topics/dask_best_practices/images/grib-bottleneck.png differ diff --git a/docs/src/further_topics/dask_best_practices/images/loop_third_party_kapture_results.png b/docs/src/further_topics/dask_best_practices/images/loop_third_party_kapture_results.png new file mode 100644 index 0000000000..8f388bb89c Binary files /dev/null and b/docs/src/further_topics/dask_best_practices/images/loop_third_party_kapture_results.png differ diff --git a/docs/src/further_topics/dask_best_practices/index.rst b/docs/src/further_topics/dask_best_practices/index.rst new file mode 100644 index 0000000000..f126427d3f --- /dev/null +++ b/docs/src/further_topics/dask_best_practices/index.rst @@ -0,0 +1,221 @@ +.. _dask best practices: + +Dask Best Practices +******************* + +This section outlines some of the best practices when using Dask with Iris. These +practices involve improving performance through rechunking, making the best use of +computing clusters and avoiding parallelisation conflicts between Dask and NumPy. + + +.. note:: + + Here, we have collated advice and a handful of examples, from the topics most + relevant when using Dask with Iris, that we hope will assist users to make + the best start when using Dask. It is *not* a fully comprehensive guide + encompassing all best practices. You can find more general dask information in the + `official Dask Documentation `_. + + +Introduction +============ + +`Dask `_ is a powerful tool for speeding up data handling +via lazy loading and parallel processing. To get the full benefit of using +Dask, it is important to configure it correctly and supply it with +appropriately structured data. For example, we may need to "chunk" data arrays +into smaller pieces to process, read and write it; getting the "chunking" right +can make a significant different to performance! + + +.. _numpy_threads: + +NumPy Threads +============= + +In certain scenarios NumPy will attempt to perform threading using an +external library - typically OMP, MKL or openBLAS - making use of **every** +CPU available. This interacts badly with Dask: + +* Dask may create multiple instances of NumPy, each generating enough + threads to use **all** the available CPUs. The resulting sharing of CPUs + between threads greatly reduces performance. The more cores there are, the + more pronounced this problem is. +* NumPy will generate enough threads to use all available CPUs even + if Dask is deliberately configured to only use a subset of CPUs. The + resulting sharing of CPUs between threads greatly reduces performance. +* `Dask is already designed to parallelise with NumPy arrays `_, so adding NumPy's 'competing' layer of + parallelisation could cause unpredictable performance. + +Therefore it is best to prevent NumPy performing its own parallelisation, `a +suggestion made in Dask's own documentation `_. +The following commands will ensure this in all scenarios: + +in Python... + +:: + + # Must be run before importing NumPy. + import os + os.environ["OMP_NUM_THREADS"] = "1" + os.environ["OPENBLAS_NUM_THREADS"] = "1" + os.environ["MKL_NUM_THREADS"] = "1" + os.environ["VECLIB_MAXIMUM_THREADS"] = "1" + os.environ["NUMEXPR_NUM_THREADS"] = "1" + +or in Linux command line... + +:: + + export OMP_NUM_THREADS=1 + export OPENBLAS_NUM_THREADS=1 + export MKL_NUM_THREADS=1 + export VECLIB_MAXIMUM_THREADS=1 + export NUMEXPR_NUM_THREADS=1 + + +.. _multi-pro_systems: + +Dask on Computing Clusters +========================== + +Dask is well suited for use on computing clusters, but there are some important factors you must be +aware of. In particular, you will always need to explicitly control parallel +operation, both in Dask and likewise in NumPy. + + +.. _multi-pro_slurm: + +CPU Allocation +-------------- + +When running on a computing cluster, unless configured otherwise, Dask will attempt to create +one parallel 'worker' task for each CPU. However, when using a job scheduler such as Slurm, only *some* of +these CPUs are actually accessible -- often, and by default, only one. This leads to a serious +over-commitment unless it is controlled. + +So, **whenever Iris is used on a computing cluster, you must always control the number +of dask workers to a sensible value**, matching the slurm allocation. You do +this with:: + + dask.config.set(num_workers=N) + +For an example, see :doc:`dask_bags_and_greed`. + +Alternatively, when there is only one CPU allocated, it may actually be more +efficient to use a "synchronous" scheduler instead, with:: + + dask.config.set(scheduler='synchronous') + +See the Dask documentation on `Single thread synchronous scheduler +`_. + + +.. _multi-pro_numpy: + +NumPy Threading +--------------- + +NumPy also interrogates the visible number of CPUs to multi-thread its operations. +The large number of CPUs available in a computing cluster will thus cause confusion if NumPy +attempts its own parallelisation, so this must be prevented. Refer back to +:ref:`numpy_threads` for more detail. + + +Distributed +----------- + +Even though allocations on a computing cluster are generally restricted to a single node, there +are still good reasons for using 'dask.distributed' in many cases. See `Single Machine: dask.distributed +`_ in the Dask documentation. + + +Chunking +======== + +Dask breaks down large data arrays into chunks, allowing efficient +parallelisation by processing several smaller chunks simultaneously. For more +information, see the documentation on +`Dask Array `_. + +Iris provides a basic chunking shape to Dask, attempting to set the shape for +best performance. The chunking that is used can depend on the file format that +is being loaded. See below for how chunking is performed for: + +* :ref:`chunking_netcdf` +* :ref:`chunking_pp_ff` + +It can in some cases be beneficial to re-chunk the arrays in Iris cubes. +For information on how to do this, see :ref:`dask_rechunking`. + + +.. _chunking_netcdf: + +NetCDF Files +------------ + +NetCDF files can include their own chunking specification. This is either +specified when creating the file, or is automatically assigned if one or +more of the dimensions is `unlimited `_. +Importantly, netCDF chunk shapes are **not optimised for Dask +performance**. + +Chunking can be set independently for any variable in a netCDF file. +When a netCDF variable uses an unlimited dimension, it is automatically +chunked: the chunking is the shape of the whole variable, but with '1' instead +of the length in any unlimited dimensions. + +When chunking is specified for netCDF data, Iris will set the dask chunking +to an integer multiple or fraction of that shape, such that the data size is +near to but not exceeding the dask array chunk size. + + +.. _chunking_pp_ff: + +PP and Fieldsfiles +------------------ + +PP and Fieldsfiles contain multiple 2D fields of data. When loading PP or +Fieldsfiles into Iris cubes, the chunking will automatically be set to a chunk +per field. + +For example, if a PP file contains 2D lat-lon fields for each of the +85 model level numbers, it will load in a cube that looks as follows:: + + (model_level_number: 85; latitude: 144; longitude: 192) + +The data in this cube will be partitioned with chunks of shape +:code:`(1, 144, 192)`. + +If the file(s) being loaded contain multiple fields, this can lead to an +excessive amount of chunks which will result in poor performance. + +When the default chunking is not appropriate, it is possible to rechunk. +:doc:`dask_pp_to_netcdf` provides a detailed demonstration of how Dask can optimise +that process. + + +Examples +======== + +We have written some examples of use cases for using Dask, that come with advice and +explanations for why and how the tasks are performed the way they are. + +If you feel you have an example of a Dask best practice that you think may be helpful to others, +please share them with us by raising a new `discussion on the Iris repository `_. + +* :doc:`dask_pp_to_netcdf` +* :doc:`dask_parallel_loop` +* :doc:`dask_bags_and_greed` + +.. toctree:: + :hidden: + :maxdepth: 1 + + dask_pp_to_netcdf + dask_parallel_loop + dask_bags_and_greed diff --git a/docs/src/further_topics/filtering_warnings.rst b/docs/src/further_topics/filtering_warnings.rst new file mode 100644 index 0000000000..ef8701f951 --- /dev/null +++ b/docs/src/further_topics/filtering_warnings.rst @@ -0,0 +1,271 @@ +.. _filtering-warnings: + +================== +Filtering Warnings +================== + +Since Iris cannot predict your specific needs, it by default raises Warnings +for anything that might be a problem for **any** user, and is designed to work with +you to ``ignore`` Warnings which you do not find helpful. + +.. testsetup:: filtering_warnings + + from pathlib import Path + import sys + import warnings + + import iris + import iris.coord_systems + import iris.warnings + + # Hack to ensure doctests actually see Warnings that are raised, and that + # they have a relative path (so a test pass is not machine-dependent). + warnings.filterwarnings("default") + IRIS_FILE = Path(iris.__file__) + def custom_warn(message, category, filename, lineno, file=None, line=None): + filepath = Path(filename) + filename = str(filepath.relative_to(IRIS_FILE.parents[1])) + sys.stdout.write(warnings.formatwarning(message, category, filename, lineno)) + warnings.showwarning = custom_warn + + geog_cs_globe = iris.coord_systems.GeogCS(6400000) + orthographic_coord_system = iris.coord_systems.Orthographic( + longitude_of_projection_origin=0, + latitude_of_projection_origin=0, + ellipsoid=geog_cs_globe, + ) + + + def my_operation(): + geog_cs_globe.inverse_flattening = 0.1 + _ = orthographic_coord_system.as_cartopy_crs() + +Here is a hypothetical operation - ``my_operation()`` - which raises two +Warnings: + +.. doctest:: filtering_warnings + + >>> my_operation() + ... + iris/coord_systems.py:445: IrisUserWarning: Setting inverse_flattening does not affect other properties of the GeogCS object. To change other properties set them explicitly or create a new GeogCS instance. + warnings.warn(wmsg, category=iris.warnings.IrisUserWarning) + iris/coord_systems.py:771: IrisDefaultingWarning: Discarding false_easting and false_northing that are not used by Cartopy. + warnings.warn( + +Warnings can be suppressed using the Python warnings filter with the ``ignore`` +action. Detailed information is available in the Python documentation: +:external+python:mod:`warnings`. + +The key points are: + +- :ref:`When`: a warnings filter can be applied + either from the command line or from within Python. +- :ref:`What`: a warnings filter accepts + various arguments to specify which Warnings are being filtered. Both broad + and narrow filters are possible. + +.. _warning-filter-application: + +**When** a Warnings Filter can be Applied +----------------------------------------- + +- **Command line:** setting the :external+python:envvar:`PYTHONWARNINGS` + environment variable. +- **Command line:** the `python -W `_ + command line argument. +- **Within Python:** use :func:`warnings.filterwarnings` . + +The :ref:`warning-filter-specificity` section demonstrates using +:func:`warnings.filterwarnings`, and shows the equivalent **command line** +approaches. + + +.. _warning-filter-specificity: + +**What** Warnings will be Filtered +---------------------------------- + +.. note:: + + For all of these examples we are using the + :class:`~warnings.catch_warnings` context manager to ensure any changes to + settings are temporary. + + This should always work fine for the ``ignore`` + warning filter action, but note that some of the other actions + may not behave correctly with all Iris operations, as + :class:`~warnings.catch_warnings` is not thread-safe (e.g. using the + ``once`` action may cause 1 warning per chunk of lazy data). + +Specific Warnings +~~~~~~~~~~~~~~~~~ + +**When you do not want a specific warning, but still want all others.** + +You can target specific Warning messages, e.g. + +.. doctest:: filtering_warnings + + >>> with warnings.catch_warnings(): + ... warnings.filterwarnings("ignore", message="Discarding false_easting") + ... my_operation() + ... + iris/coord_systems.py:445: IrisUserWarning: Setting inverse_flattening does not affect other properties of the GeogCS object. To change other properties set them explicitly or create a new GeogCS instance. + warnings.warn(wmsg, category=iris.warnings.IrisUserWarning) + +:: + + python -W ignore:"Discarding false_easting" + export PYTHONWARNINGS=ignore:"Discarding false_easting" + +---- + +Or you can target Warnings raised by specific lines of specific modules, e.g. + +.. doctest:: filtering_warnings + + >>> with warnings.catch_warnings(): + ... warnings.filterwarnings("ignore", module="iris.coord_systems", lineno=445) + ... my_operation() + ... + iris/coord_systems.py:771: IrisDefaultingWarning: Discarding false_easting and false_northing that are not used by Cartopy. + warnings.warn( + +:: + + python -W ignore:::iris.coord_systems:445 + export PYTHONWARNINGS=ignore:::iris.coord_systems:445 + +Warnings from a Common Source +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +**When you do not want ANY warnings raised by a module, or collection of +modules.** + +E.g. filtering the ``coord_systems`` module: + +.. doctest:: filtering_warnings + + >>> with warnings.catch_warnings(): + ... warnings.filterwarnings("ignore", module="iris.coord_systems") + ... my_operation() + +:: + + python -W ignore:::iris.coord_systems + export PYTHONWARNINGS=ignore:::iris.coord_systems + +---- + +If using :func:`warnings.filterwarnings` , you can also use partial +definitions. The below example will ``ignore`` all Warnings from ``iris`` as a +whole. + +.. doctest:: filtering_warnings + + >>> with warnings.catch_warnings(): + ... warnings.filterwarnings("ignore", module="iris") + ... my_operation() + +The above 'partial' filter is not available with the command line approaches. + +Warnings of a Common Type +~~~~~~~~~~~~~~~~~~~~~~~~~ + +**When you do not want any Warnings of the same nature, from anywhere in the +code you are calling.** + +The below example will ``ignore`` any +:class:`~iris.warnings.IrisDefaultingWarning` that gets raised by *any* +module during execution: + +.. doctest:: filtering_warnings + + >>> with warnings.catch_warnings(): + ... warnings.filterwarnings( + ... "ignore", + ... category=iris.warnings.IrisDefaultingWarning + ... ) + ... my_operation() + ... + iris/coord_systems.py:445: IrisUserWarning: Setting inverse_flattening does not affect other properties of the GeogCS object. To change other properties set them explicitly or create a new GeogCS instance. + warnings.warn(wmsg, category=iris.warnings.IrisUserWarning) + +---- + +Using :class:`~iris.warnings.IrisUserWarning` in the filter will ``ignore`` +both Warnings, since :class:`~iris.warnings.IrisDefaultingWarning` subclasses +:class:`~iris.warnings.IrisUserWarning` : + +.. doctest:: filtering_warnings + + >>> with warnings.catch_warnings(): + ... warnings.filterwarnings( + ... "ignore", + ... category=iris.warnings.IrisUserWarning + ... ) + ... my_operation() + +---- + +The command line approaches can only handle the built-in Warning +categories (`cpython#66733`_):: + + python -W ignore::UserWarning + export PYTHONWARNINGS=ignore::UserWarning + +---- + +There are several built-in Python warning categories that can be used here +(:class:`DeprecationWarning` being a popular example, see +:external+python:mod:`warnings` for more). Since Iris has +so many different warnings that might be raised, Iris subclasses +:class:`UserWarning` to :class:`~iris.warnings.IrisUserWarning`, which itself +has **many** specialised subclasses. These subclasses exist to give you more +granularity in your warning filtering; you can see the full list by +viewing the :mod:`iris.warnings` module. + +.. attention:: + + If you have ideas for adding/altering Iris' warning categories, please + :ref:`get in touch`! The categories exist to + make your life easier, and it is simple to make modifications. + + +More Detail +----------- + +Different people use Iris for very different purposes, from quick file +visualisation to extract-transform-load to statistical analysis. These +contrasting priorities mean disagreement on which Iris problems can be ignored +and which are critically important. + +For problems that prevent Iris functioning: **Concrete Exceptions** are raised, which +stop code from running any further - no debate here. For less catastrophic +problems: **Warnings** are raised, +which notify you (in ``stderr``) but allow code to continue running. The Warnings are +there because Iris may **OR may not** function in the way you expect, +depending on what you need - e.g. a problem might prevent data being saved to +NetCDF, but statistical analysis will still work fine. + +Examples of Iris Warnings +~~~~~~~~~~~~~~~~~~~~~~~~~ + +- If you attempt to plot un-bounded point data as a ``pcolormesh``: Iris will + guess appropriate bounds around each point so that quadrilaterals can be + plotted. This permanently modifies the relevant coordinates, so the you are + warned in case downstream operations assume un-bounded coordinates. +- If you load a NetCDF file where a CF variable references another variable - + e.g. ``my_var:coordinates = "depth_var" ;`` - but the referenced variable + (``depth_var``) is not in the file: Iris will still construct + its data model, but without this reference relationship. You are warned since + the file includes an error and the loaded result might therefore not be as + expected. + + +.. testcleanup:: filtering_warnings + + warnings.filterwarnings("ignore") + + +.. _cpython#66733: https://github.com/python/cpython/issues/66733 diff --git a/docs/src/further_topics/index.rst b/docs/src/further_topics/index.rst new file mode 100644 index 0000000000..73ce3d55e7 --- /dev/null +++ b/docs/src/further_topics/index.rst @@ -0,0 +1,21 @@ +.. _further_topics_index: + + +Further Topics +=============== + +Extra information on specific technical issues. + +.. toctree:: + :maxdepth: 1 + + filtering_warnings + metadata + lenient_metadata + lenient_maths + um_files_loading + missing_data_handling + netcdf_io + dask_best_practices/index + ugrid/index + which_regridder_to_use \ No newline at end of file diff --git a/docs/src/further_topics/lenient_maths.rst b/docs/src/further_topics/lenient_maths.rst new file mode 100644 index 0000000000..51f77fb956 --- /dev/null +++ b/docs/src/further_topics/lenient_maths.rst @@ -0,0 +1,281 @@ +.. _lenient maths: + +Lenient Cube Maths +****************** + +This section provides an overview of lenient cube maths. In particular, it explains +what lenient maths involves, clarifies how it differs from normal or strict cube +maths, and demonstrates how you can exercise fine control over whether your cube +maths operations are lenient or strict. + +Note that, lenient cube maths is the default behaviour of Iris from version +``3.0.0``. + + +Introduction +============ + +Lenient maths stands somewhat on the shoulders of giants. If you've not already +done so, you may want to recap the material discussed in the following sections, + +- :ref:`cube maths`, +- :ref:`metadata`, +- :ref:`lenient metadata` + +In addition to this, cube maths leans heavily on the :mod:`~iris.common.resolve` +module, which provides the necessary infrastructure required by Iris to analyse +and combine each :class:`~iris.cube.Cube` operand involved in a maths operation +into the resultant :class:`~iris.cube.Cube`. It may be worth while investing +some time to understand how the :class:`~iris.common.resolve.Resolve` class +underpins cube maths, and consider how it may be used in general to combine +or resolve cubes together. + +Given these prerequisites, recall that :ref:`lenient behaviour ` +introduced and discussed the concept of lenient metadata; a more pragmatic and +forgiving approach to :ref:`comparing `, +:ref:`combining ` and understanding the +:ref:`differences ` between your metadata +(:ref:`metadata members table`). The lenient metadata philosophy introduced +there is extended to cube maths, with the view to also preserving as much common +coordinate (:ref:`metadata classes table`) information, as well as common +metadata, between the participating :class:`~iris.cube.Cube` operands as possible. + +Let's consolidate our understanding of lenient and strict cube maths through +a practical worked example, which we'll explore together next. + + +.. _lenient example: + +Lenient Example +=============== + +.. testsetup:: lenient-example + + import iris + from iris.common import LENIENT + experiment = iris.load_cube(iris.sample_data_path("hybrid_height.nc"), "air_potential_temperature") + control = experiment[0] + control.remove_aux_factory(control.aux_factory()) + for coord in ["sigma", "forecast_reference_time", "forecast_period", "atmosphere_hybrid_height_coordinate", "surface_altitude"]: + control.remove_coord(coord) + control.attributes["Conventions"] = "CF-1.7" + experiment.attributes["experiment-id"] = "RT3 50" + +Consider the following :class:`~iris.cube.Cube` of ``air_potential_temperature``, +which has an `atmosphere hybrid height parametric vertical coordinate`_, and +represents the output of an low-resolution global atmospheric ``experiment``, + +.. doctest:: lenient-example + + >>> print(experiment) + air_potential_temperature / (K) (model_level_number: 15; grid_latitude: 100; grid_longitude: 100) + Dimension coordinates: + model_level_number x - - + grid_latitude - x - + grid_longitude - - x + Auxiliary coordinates: + atmosphere_hybrid_height_coordinate x - - + sigma x - - + surface_altitude - x x + Derived coordinates: + altitude x x x + Scalar coordinates: + forecast_period 0.0 hours + forecast_reference_time 2009-09-09 17:10:00 + time 2009-09-09 17:10:00 + Attributes: + Conventions 'CF-1.5' + STASH m01s00i004 + experiment-id 'RT3 50' + source 'Data from Met Office Unified Model 7.04' + +Consider also the following :class:`~iris.cube.Cube`, which has the same global +spatial extent, and acts as a ``control``, + +.. doctest:: lenient-example + + >>> print(control) + air_potential_temperature / (K) (grid_latitude: 100; grid_longitude: 100) + Dimension coordinates: + grid_latitude x - + grid_longitude - x + Scalar coordinates: + model_level_number 1 + time 2009-09-09 17:10:00 + Attributes: + Conventions 'CF-1.7' + STASH m01s00i004 + source 'Data from Met Office Unified Model 7.04' + +Now let's subtract these cubes in order to calculate a simple ``difference``, + +.. doctest:: lenient-example + + >>> difference = experiment - control + >>> print(difference) + unknown / (K) (model_level_number: 15; grid_latitude: 100; grid_longitude: 100) + Dimension coordinates: + model_level_number x - - + grid_latitude - x - + grid_longitude - - x + Auxiliary coordinates: + atmosphere_hybrid_height_coordinate x - - + sigma x - - + surface_altitude - x x + Derived coordinates: + altitude x x x + Scalar coordinates: + forecast_period 0.0 hours + forecast_reference_time 2009-09-09 17:10:00 + time 2009-09-09 17:10:00 + Attributes: + experiment-id 'RT3 50' + source 'Data from Met Office Unified Model 7.04' + +Note that, cube maths automatically takes care of broadcasting the +dimensionality of the ``control`` up to that of the ``experiment``, in order to +calculate the ``difference``. This is performed only after ensuring that both +the **dimension coordinates** ``grid_latitude`` and ``grid_longitude`` are first +:ref:`leniently equivalent `. + +As expected, the resultant ``difference`` contains the +:class:`~iris.aux_factory.HybridHeightFactory` and all it's associated **auxiliary +coordinates**. However, the **scalar coordinates** have been leniently combined to +preserve as much coordinate information as possible, and the ``attributes`` +dictionaries have also been leniently combined. In addition, see what further +:ref:`rationalisation ` is always performed by cube maths on +the resultant metadata and coordinates. + +Also, note that the ``model_level_number`` **scalar coordinate** from the +``control`` has be superseded by the similarly named **dimension coordinate** +from the ``experiment`` in the resultant ``difference``. + +Now let's compare and contrast this lenient result with the strict alternative. +But before we do so, let's first clarify how to control the behaviour of cube maths. + + +Control the Behaviour +===================== + +As stated earlier, lenient cube maths is the default behaviour from Iris ``3.0.0``. +However, this behaviour may be controlled via the thread-safe ``LENIENT["maths"]`` +runtime option, + +.. doctest:: lenient-example + + >>> from iris.common import LENIENT + >>> print(LENIENT) + Lenient(maths=True) + +Which may be set and applied globally thereafter for Iris within the current +thread of execution, + +.. doctest:: lenient-example + + >>> LENIENT["maths"] = False # doctest: +SKIP + >>> print(LENIENT) # doctest: +SKIP + Lenient(maths=False) + +Or alternatively, temporarily alter the behaviour of cube maths only within the +scope of the ``LENIENT`` `context manager`_, + +.. doctest:: lenient-example + + >>> print(LENIENT) + Lenient(maths=True) + >>> with LENIENT.context(maths=False): + ... print(LENIENT) + ... + Lenient(maths=False) + >>> print(LENIENT) + Lenient(maths=True) + + +Strict Example +============== + +Now that we know how to control the underlying behaviour of cube maths, +let's return to our :ref:`lenient example `, but this +time perform **strict** cube maths instead, + +.. doctest:: lenient-example + + >>> with LENIENT.context(maths=False): + ... difference = experiment - control + ... + >>> print(difference) + unknown / (K) (model_level_number: 15; grid_latitude: 100; grid_longitude: 100) + Dimension coordinates: + model_level_number x - - + grid_latitude - x - + grid_longitude - - x + Auxiliary coordinates: + atmosphere_hybrid_height_coordinate x - - + sigma x - - + surface_altitude - x x + Derived coordinates: + altitude x x x + Scalar coordinates: + time 2009-09-09 17:10:00 + Attributes: + source 'Data from Met Office Unified Model 7.04' + +Although the numerical result of this strict cube maths operation is identical, +it is not as rich in metadata as the :ref:`lenient alternative `. +In particular, it does not contain the ``forecast_period`` and ``forecast_reference_time`` +**scalar coordinates**, or the ``experiment-id`` in the ``attributes`` dictionary. + +This is because strict cube maths, in general, will only return common metadata +and common coordinates that are :ref:`strictly equivalent `. + + +Finer Detail +============ + +In general, if you want to preserve as much metadata and coordinate information as +possible during cube maths, then opt to use the default lenient behaviour. Otherwise, +favour the strict alternative if you require to enforce precise metadata and +coordinate commonality. + +The following information may also help you decide whether lenient cube maths best +suits your use case, + +- lenient behaviour uses :ref:`lenient equality ` to match the + metadata of coordinates, which is more tolerant to certain metadata differences, +- lenient behaviour uses :ref:`lenient combination ` to create + the metadata of coordinates on the resultant :class:`~iris.cube.Cube`, +- lenient behaviour will attempt to cover each dimension with a :class:`~iris.coords.DimCoord` + in the resultant :class:`~iris.cube.Cube`, even though only one :class:`~iris.cube.Cube` + operand may describe that dimension, +- lenient behaviour will attempt to include **auxiliary coordinates** in the + resultant :class:`~iris.cube.Cube` that exist on only one :class:`~iris.cube.Cube` + operand, +- lenient behaviour will attempt to include **scalar coordinates** in the + resultant :class:`~iris.cube.Cube` that exist on only one :class:`~iris.cube.Cube` + operand, +- lenient behaviour will add a coordinate to the resultant :class:`~iris.cube.Cube` + with **bounds**, even if only one of the associated matching coordinates from the + :class:`~iris.cube.Cube` operands has **bounds**, +- strict and lenient behaviour both require that the **points** and **bounds** of + matching coordinates from :class:`~iris.cube.Cube` operands must be strictly + equivalent. However, mismatching **bounds** of **scalar coordinates** are ignored + i.e., a scalar coordinate that is common to both :class:`~iris.cube.Cube` operands, with + equivalent **points** but different **bounds**, will be added to the resultant + :class:`~iris.cube.Cube` with but with **no bounds** + +.. _sanitise metadata: + +Additionally, cube maths will always perform the following rationalisation of the +resultant :class:`~iris.cube.Cube`, + +- clear the ``standard_name``, ``long_name`` and ``var_name``, defaulting the + :meth:`~iris.common.mixin.CFVariableMixin.name` to ``unknown``, +- clear the :attr:`~iris.cube.Cube.cell_methods`, +- clear the :meth:`~iris.cube.Cube.cell_measures`, +- clear the :meth:`~iris.cube.Cube.ancillary_variables`, +- clear the ``STASH`` key from the :attr:`~iris.cube.Cube.attributes` dictionary, +- assign the appropriate :attr:`~iris.common.mixin.CFVariableMixin.units` + + +.. _atmosphere hybrid height parametric vertical coordinate: https://cfconventions.org/Data/cf-conventions/cf-conventions-1.8/cf-conventions.html#atmosphere-hybrid-height-coordinate +.. _context manager: https://docs.python.org/3/library/contextlib.html diff --git a/docs/src/further_topics/lenient_metadata.rst b/docs/src/further_topics/lenient_metadata.rst new file mode 100644 index 0000000000..5de9ad70c4 --- /dev/null +++ b/docs/src/further_topics/lenient_metadata.rst @@ -0,0 +1,476 @@ +.. _lenient metadata: + +Lenient Metadata +**************** + +This section discusses lenient metadata; what it is, what it means, and how you +can perform **lenient** rather than **strict** operations with your metadata. + + +Introduction +============ + +As discussed in :ref:`metadata`, a rich, common metadata API is available within +Iris that supports metadata :ref:`equality `, +:ref:`difference `, :ref:`combination `, +and also :ref:`conversion `. + +The common metadata API is implemented through the ``metadata`` property +on each of the Iris `CF Conventions`_ class containers +(:ref:`metadata classes table`), and provides a common gateway for users to +easily manage and manipulate their metadata in a consistent and unified way. + +This is primarily all thanks to the metadata classes (:ref:`metadata classes table`) +that support the necessary state and behaviour required by the common metadata +API. Namely, it is the ``equal`` (``__eq__``), ``difference`` and ``combine`` +methods that provide this rich metadata behaviour, all of which are explored +more fully in :ref:`metadata`. + + +Strict Behaviour +================ + +.. testsetup:: strict-behaviour + + import iris + cube = iris.load_cube(iris.sample_data_path("A1B_north_america.nc")) + latitude = cube.coord("latitude") + +The feature that is common between the ``equal``, ``difference`` and +``combine`` metadata class methods, is that they all perform **strict** +metadata member comparisons **by default**. + +The **strict** behaviour implemented by these methods can be summarised +as follows, where ``X`` and ``Y`` are any objects that are non-identical, + +.. _strict equality table: +.. table:: - :ref:`Strict equality ` + :widths: auto + :align: center + + ======== ======== ========= + Left Right ``equal`` + ======== ======== ========= + ``X`` ``Y`` ``False`` + ``Y`` ``X`` ``False`` + ``X`` ``X`` ``True`` + ``X`` ``None`` ``False`` + ``None`` ``X`` ``False`` + ======== ======== ========= + +.. _strict difference table: +.. table:: - :ref:`Strict difference ` + :widths: auto + :align: center + + ======== ======== ================= + Left Right ``difference`` + ======== ======== ================= + ``X`` ``Y`` (``X``, ``Y``) + ``Y`` ``X`` (``Y``, ``X``) + ``X`` ``X`` ``None`` + ``X`` ``None`` (``X``, ``None``) + ``None`` ``X`` (``None``, ``X``) + ======== ======== ================= + +.. _strict combine table: +.. table:: - :ref:`Strict combination ` + :widths: auto + :align: center + + ======== ======== =========== + Left Right ``combine`` + ======== ======== =========== + ``X`` ``Y`` ``None`` + ``Y`` ``X`` ``None`` + ``X`` ``X`` ``X`` + ``X`` ``None`` ``None`` + ``None`` ``X`` ``None`` + ======== ======== =========== + +.. _strict example: + +This type of **strict** behaviour does offer obvious benefit and value. However, +it can be unnecessarily restrictive. For example, consider the metadata of the +following ``latitude`` coordinate, + +.. doctest:: strict-behaviour + + >>> latitude.metadata + DimCoordMetadata(standard_name='latitude', long_name=None, var_name='latitude', units=Unit('degrees'), attributes={}, coord_system=GeogCS(6371229.0), climatological=False, circular=False) + +Now, let's create a doctored version of this metadata with a different ``var_name``, + +.. doctest:: strict-behaviour + + >>> metadata = latitude.metadata._replace(var_name=None) + >>> metadata + DimCoordMetadata(standard_name='latitude', long_name=None, var_name=None, units=Unit('degrees'), attributes={}, coord_system=GeogCS(6371229.0), climatological=False, circular=False) + +Clearly, these metadata are different, + +.. doctest:: strict-behaviour + + >>> metadata != latitude.metadata + True + >>> metadata.difference(latitude.metadata) + DimCoordMetadata(standard_name=None, long_name=None, var_name=(None, 'latitude'), units=None, attributes=None, coord_system=None, climatological=None, circular=None) + +And yet, they both have the same ``name``, which some may find slightly confusing +(see :meth:`~iris.common.metadata.BaseMetadata.name` for clarification) + +.. doctest:: strict-behaviour + + >>> metadata.name() + 'latitude' + >>> latitude.name() + 'latitude' + +Resolving this metadata inequality can only be overcome by ensuring that each +metadata member precisely matches. + +If your workflow demands such metadata rigour, then the default strict behaviour +of the common metadata API will satisfy your needs. Typically though, such +strictness is not necessary, and as of Iris ``3.0.0`` an alternative more +practical behaviour is available. + + +.. _lenient behaviour: + +Lenient Behaviour +================= + +.. testsetup:: lenient-behaviour + + import iris + cube = iris.load_cube(iris.sample_data_path("A1B_north_america.nc")) + latitude = cube.coord("latitude") + +Lenient metadata aims to offer a practical, common sense alternative to the +strict rigour of the default Iris metadata behaviour. It is intended to be +complementary, and suitable for those users with a more relaxed requirement +regarding their metadata. + +The lenient behaviour that is implemented as an alternative to the +:ref:`strict equality `, :ref:`strict difference `, +and :ref:`strict combination ` can be summarised +as follows, + +.. _lenient equality table: +.. table:: - Lenient equality + :widths: auto + :align: center + + ======== ======== ========= + Left Right ``equal`` + ======== ======== ========= + ``X`` ``Y`` ``False`` + ``Y`` ``X`` ``False`` + ``X`` ``X`` ``True`` + ``X`` ``None`` ``True`` + ``None`` ``X`` ``True`` + ======== ======== ========= + +.. _lenient difference table: +.. table:: - Lenient difference + :widths: auto + :align: center + + ======== ======== ================= + Left Right ``difference`` + ======== ======== ================= + ``X`` ``Y`` (``X``, ``Y``) + ``Y`` ``X`` (``Y``, ``X``) + ``X`` ``X`` ``None`` + ``X`` ``None`` ``None`` + ``None`` ``X`` ``None`` + ======== ======== ================= + +.. _lenient combine table: +.. table:: - Lenient combination + :widths: auto + :align: center + + ======== ======== =========== + Left Right ``combine`` + ======== ======== =========== + ``X`` ``Y`` ``None`` + ``Y`` ``X`` ``None`` + ``X`` ``X`` ``X`` + ``X`` ``None`` ``X`` + ``None`` ``X`` ``X`` + ======== ======== =========== + +Lenient behaviour is enabled for the ``equal``, ``difference``, and ``combine`` +metadata class methods via the ``lenient`` keyword argument, which is ``False`` +by default. Let's first explore some examples of lenient equality, difference +and combination, before going on to clarify which metadata members adopt +lenient behaviour for each of the metadata classes. + + +.. _lenient equality: + +Lenient Equality +---------------- + +Lenient equality is enabled using the ``lenient`` keyword argument, therefore +we are forced to use the ``equal`` method rather than the ``==`` operator +(``__eq__``). Otherwise, the ``equal`` method and ``==`` operator are both +functionally equivalent. + +For example, consider the :ref:`previous strict example `, +where two separate ``latitude`` coordinates are compared, each with different +``var_name`` members, + +.. doctest:: strict-behaviour + + >>> metadata.equal(latitude.metadata, lenient=True) + True + +Unlike strict comparison, lenient comparison is a little more forgiving. In +this case, leniently comparing **something** with **nothing** (``None``) will +always be ``True``; it's the graceful compromise to the strict alternative. + +So let's take the opportunity to reinforce this a little further before moving on, +by leniently comparing different ``attributes`` dictionaries; a constant source +of strict contention. + +Firstly, populate the metadata of our ``latitude`` coordinate appropriately, + +.. doctest:: lenient-behaviour + + >>> attributes = {"grinning face": "😀", "neutral face": "😐"} + >>> latitude.attributes = attributes + >>> latitude.metadata # doctest: +SKIP + DimCoordMetadata(standard_name='latitude', long_name=None, var_name='latitude', units=Unit('degrees'), attributes={'grinning face': '😀', 'neutral face': '😐'}, coord_system=GeogCS(6371229.0), climatological=False, circular=False) + +Then create another :class:`~iris.common.metadata.DimCoordMetadata` with a different +``attributes`` `dict`_, namely, + +- the ``grinning face`` key is **missing**, +- the ``neutral face`` key has the **same value**, and +- the ``upside-down face`` key is **new** + +.. doctest:: lenient-behaviour + + >>> attributes = {"neutral face": "😐", "upside-down face": "🙃"} + >>> metadata = latitude.metadata._replace(attributes=attributes) + >>> metadata # doctest: +SKIP + DimCoordMetadata(standard_name='latitude', long_name=None, var_name='latitude', units=Unit('degrees'), attributes={'neutral face': '😐', 'upside-down face': '🙃'}, coord_system=GeogCS(6371229.0), climatological=False, circular=False) + +Now, compare our metadata, + +.. doctest:: lenient-behaviour + + >>> metadata.equal(latitude.metadata) + False + >>> metadata.equal(latitude.metadata, lenient=True) + True + +Again, lenient equality (:ref:`lenient equality table`) offers a more +forgiving and practical alternative to strict behaviour. + + +.. _lenient difference: + +Lenient Difference +------------------ + +Similar to :ref:`lenient equality`, the lenient ``difference`` method +(:ref:`lenient difference table`) considers there to be no difference between +comparing **something** with **nothing** (``None``). This working assumption is +not naively applied to all metadata members, but rather a more pragmatic approach +is adopted, as discussed later in :ref:`lenient members`. + +Again, lenient behaviour for the ``difference`` metadata class method is enabled +by the ``lenient`` keyword argument. For example, consider again the +:ref:`previous strict example ` involving our ``latitude`` +coordinate, + +.. doctest:: strict-behaviour + + >>> metadata.difference(latitude.metadata) + DimCoordMetadata(standard_name=None, long_name=None, var_name=(None, 'latitude'), units=None, attributes=None, coord_system=None, climatological=None, circular=None) + >>> metadata.difference(latitude.metadata, lenient=True) is None + True + +And revisiting our slightly altered ``attributes`` member comparison example, +brings home the benefits of the lenient difference behaviour. So, given our +``latitude`` coordinate with its populated ``attributes`` dictionary, + +.. doctest:: lenient-behaviour + + >>> latitude.attributes # doctest: +SKIP + {'grinning face': '😀', 'neutral face': '😐'} + +We create another :class:`~iris.common.metadata.DimCoordMetadata` with a dissimilar +``attributes`` member, namely, + +- the ``grinning face`` key is **missing**, +- the ``neutral face`` key has a **different value**, and +- the ``upside-down face`` key is **new** + +.. doctest:: lenient-behaviour + + >>> attributes = {"neutral face": "😜", "upside-down face": "🙃"} + >>> metadata = latitude.metadata._replace(attributes=attributes) + >>> metadata # doctest: +SKIP + DimCoordMetadata(standard_name='latitude', long_name=None, var_name='latitude', units=Unit('degrees'), attributes={'neutral face': '😜', 'upside-down face': '🙃'}, coord_system=GeogCS(6371229.0), climatological=False, circular=False) + +Now comparing the strict and lenient behaviour for the ``difference`` method, +highlights the change in how such dissimilar metadata is treated gracefully, + +.. doctest:: lenient-behaviour + + >>> metadata.difference(latitude.metadata).attributes # doctest: +SKIP + {'upside-down face': '🙃', 'neutral face': '😜'}, {'neutral face': '😐', 'grinning face': '😀'} + >>> metadata.difference(latitude.metadata, lenient=True).attributes # doctest: +SKIP + {'neutral face': '😜'}, {'neutral face': '😐'} + + +.. _lenient combination: + +Lenient Combination +------------------- + +The behaviour of the lenient ``combine`` metadata class method is outlined +in :ref:`lenient combine table`, and as with :ref:`lenient equality` and +:ref:`lenient difference` is enabled through the ``lenient`` keyword argument. + +The difference in behaviour between **lenient** and +:ref:`strict combination ` is centred around the lenient +handling of combining **something** with **nothing** (``None``) to return +**something**. Whereas strict +combination will only return a result from combining identical objects. + +Again, this is best demonstrated through a simple example of attempting to combine +partially overlapping ``attributes`` member dictionaries. For example, given the +following ``attributes`` dictionary of our favoured ``latitude`` coordinate, + +.. doctest:: lenient-behaviour + + >>> latitude.attributes # doctest: +SKIP + {'grinning face': '😀', 'neutral face': '😐'} + +We create another :class:`~iris.common.metadata.DimCoordMetadata` with overlapping +keys and values, namely, + +- the ``grinning face`` key is **missing**, +- the ``neutral face`` key has the **same value**, and +- the ``upside-down face`` key is **new** + +.. doctest:: lenient-behaviour + + >>> attributes = {"neutral face": "😐", "upside-down face": "🙃"} + >>> metadata = latitude.metadata._replace(attributes=attributes) + >>> metadata # doctest: +SKIP + DimCoordMetadata(standard_name='latitude', long_name=None, var_name='latitude', units=Unit('degrees'), attributes={'neutral face': '😐', 'upside-down face': '🙃'}, coord_system=GeogCS(6371229.0), climatological=False, circular=False) + +Comparing the strict and lenient behaviour of ``combine`` side-by-side +highlights the difference in behaviour, and the advantages of lenient combination +for more inclusive, richer metadata, + +.. doctest:: lenient-behaviour + + >>> metadata.combine(latitude.metadata).attributes + {'neutral face': '😐'} + >>> metadata.combine(latitude.metadata, lenient=True).attributes # doctest: +SKIP + {'neutral face': '😐', 'upside-down face': '🙃', 'grinning face': '😀'} + + +.. _lenient members: + +Lenient Members +--------------- + +:ref:`lenient behaviour` is not applied regardlessly across all metadata members +participating in a lenient ``equal``, ``difference`` or ``combine`` operation. +Rather, a more pragmatic application is employed based on the `CF Conventions`_ +definition of the member, and whether being lenient would result in erroneous +behaviour or interpretation. + +.. _lenient members table: +.. table:: - Lenient member participation + :widths: auto + :align: center + + ============================================================================================= ================== ============ + Metadata Class Member Behaviour + ============================================================================================= ================== ============ + All metadata classes† ``standard_name`` ``lenient``‡ + All metadata classes† ``long_name`` ``lenient``‡ + All metadata classes† ``var_name`` ``lenient``‡ + All metadata classes† ``units`` ``strict`` + All metadata classes† ``attributes`` ``lenient`` + :class:`~iris.common.metadata.CellMeasureMetadata` ``measure`` ``strict`` + :class:`~iris.common.metadata.CoordMetadata`, :class:`~iris.common.metadata.DimCoordMetadata` ``coord_system`` ``strict`` + :class:`~iris.common.metadata.CoordMetadata`, :class:`~iris.common.metadata.DimCoordMetadata` ``climatological`` ``strict`` + :class:`~iris.common.metadata.CubeMetadata` ``cell_methods`` ``strict`` + :class:`~iris.common.metadata.DimCoordMetadata` ``circular`` ``strict`` § + ============================================================================================= ================== ============ + +| **Key** +| † - Applies to all metadata classes including :class:`~iris.common.metadata.AncillaryVariableMetadata`, which has no other specialised members +| ‡ - See :ref:`special lenient name` for ``standard_name``, ``long_name``, and ``var_name`` +| § - The ``circular`` is ignored for operations between :class:`~iris.common.metadata.CoordMetadata` and :class:`~iris.common.metadata.DimCoordMetadata` + +In summary, only ``standard_name``, ``long_name``, ``var_name`` and the ``attributes`` +members are treated leniently. All other members are considered to represent +fundamental metadata that cannot, by their nature, be consider equivalent to +metadata that is missing or ``None``. For example, a :class:`~iris.cube.Cube` +with ``units`` of ``ms-1`` cannot be considered equivalent to another +:class:`~iris.cube.Cube` with ``units`` of ``unknown``; this would be a false +and dangerous scientific assumption to make. + +Similar arguments can be made for the ``measure``, ``coord_system``, ``climatological``, +``cell_methods``, and ``circular`` members, all of which are treated with +strict behaviour, regardlessly. + + +.. _special lenient name: + +Special Lenient Name Behaviour +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The ``standard_name``, ``long_name`` and ``var_name`` have a closer association +with each other compared to all other metadata members, as they all +underpin the functionality provided by the :meth:`~iris.common.mixin.CFVariableMixin.name` +method. It is imperative that the :meth:`~iris.common.mixin.CFVariableMixin.name` +derived from metadata remains constant for strict and lenient equality alike. + +As such, these metadata members have an additional layer of behaviour enforced +during :ref:`lenient equality` in order to ensure that the identity or name of +metadata does not change due to a side-effect of lenient comparison. + +For example, if simple :ref:`lenient equality ` +behaviour was applied to the ``standard_name``, ``long_name`` and ``var_name``, +the following would be considered **not** equal, + +.. table:: + :widths: auto + :align: center + + ================= ============ ============ + Member Left Right + ================= ============ ============ + ``standard_name`` ``None`` ``latitude`` + ``long_name`` ``latitude`` ``None`` + ``var_name`` ``lat`` ``latitude`` + ================= ============ ============ + +Both the **Left** and **Right** metadata would have the same +:meth:`~iris.common.mixin.CFVariableMixin.name` by definition i.e., ``latitude``. +However, lenient equality would fail due to the difference in ``var_name``. + +To account for this, lenient equality is performed by two simple consecutive steps: + +- ensure that the result returned by the :meth:`~iris.common.mixin.CFVariableMixin.name` + method is the same for the metadata being compared, then +- only perform :ref:`lenient equality ` between the + ``standard_name`` and ``long_name`` i.e., the ``var_name`` member is **not** + compared explicitly, as its value may have been accounted for through + :meth:`~iris.common.mixin.CFVariableMixin.name` equality + + +.. _dict: https://docs.python.org/3/library/stdtypes.html#mapping-types-dict +.. _CF Conventions: https://cfconventions.org/ diff --git a/docs/src/further_topics/metadata.rst b/docs/src/further_topics/metadata.rst new file mode 100644 index 0000000000..6d32b10b7a --- /dev/null +++ b/docs/src/further_topics/metadata.rst @@ -0,0 +1,1020 @@ +.. _further topics: +.. _metadata: + +Metadata +******** + +This section provides a detailed overview of how your metadata is managed +within Iris. In particular, it discusses what metadata is, where it fits +into Iris, and more importantly how you can create, access, manipulate, +and analyse your metadata. + +All the finer details covered here may not be entirely relevant to your use +case, but it's here if you ever need it. In fact, you may want to skip +straight ahead to :ref:`richer metadata`, and take it from there. + + +Introduction +============ + +As discussed in :ref:`iris_data_structures`, Iris draws heavily from the +`NetCDF CF Metadata Conventions`_ as a source for its data model, thus building +on the widely recognised and understood terminology defined within those +`CF Conventions`_ by the scientific community. + +In :ref:`iris_data_structures` we introduced several fundamental classes in Iris +that care about your ``data``, and also your ``metadata`` i.e., `data about data`_. +These are the :class:`~iris.cube.Cube`, the :class:`~iris.coords.AuxCoord`, and the +:class:`~iris.coords.DimCoord`, all of which should be familiar to you now. In +addition to these, Iris models several other classes of `CF Conventions`_ +metadata. Namely, + +- the :class:`~iris.coords.AncillaryVariable`, see `Ancillary Data`_ and `Flags`_, +- the :class:`~iris.coords.CellMeasure`, see `Cell Measures`_, +- the :class:`~iris.aux_factory.AuxCoordFactory`, see `Parametric Vertical Coordinate`_ + +Collectively, the aforementioned classes will be known here as the Iris +`CF Conventions`_ classes. + +.. hint:: + + If there are any `CF Conventions`_ metadata missing from Iris that you + care about, then please let us know by raising a :issue:`GitHub Issue` + on `SciTools/iris`_ + + +Common Metadata +=============== + +Each of the Iris `CF Conventions`_ classes use **metadata** to define them and +give them meaning. + +The **metadata** used to define an Iris `CF Conventions`_ class is composed of +individual **metadata members**, almost all of which reference specific +`CF Conventions`_ terms. The individual metadata members used to define each of +the Iris `CF Conventions`_ classes are shown in :ref:`metadata members table`. + +As :ref:`metadata members table` highlights, **specific** metadata is used to +define and represent each Iris `CF Conventions`_ class. This means that metadata +alone, can be used to easily **identify**, **compare** and **differentiate** +between individual class instances. + +For example, the collective metadata used to define an +:class:`~iris.coords.AncillaryVariable` are the ``standard_name``, ``long_name``, +``var_name``, ``units``, and ``attributes`` members. Note that, these are the +actual `data attribute`_ names of the metadata members on the Iris class. + + +.. _metadata members table: +.. table:: Iris classes that model `CF Conventions`_ metadata + :widths: auto + :align: center + + =================== ======================================= ============================== ========================================== ================================= ======================== ============================== + Metadata Members :class:`~iris.coords.AncillaryVariable` :class:`~iris.coords.AuxCoord` :class:`~iris.aux_factory.AuxCoordFactory` :class:`~iris.coords.CellMeasure` :class:`~iris.cube.Cube` :class:`~iris.coords.DimCoord` + =================== ======================================= ============================== ========================================== ================================= ======================== ============================== + ``standard_name`` ✔ ✔ ✔ ✔ ✔ ✔ + ``long_name`` ✔ ✔ ✔ ✔ ✔ ✔ + ``var_name`` ✔ ✔ ✔ ✔ ✔ ✔ + ``units`` ✔ ✔ ✔ ✔ ✔ ✔ + ``attributes`` ✔ ✔ ✔ ✔ ✔ ✔ + ``coord_system`` ✔ ✔ ✔ + ``climatological`` ✔ ✔ ✔ + ``measure`` ✔ + ``cell_methods`` ✔ + ``circular`` ✔ + =================== ======================================= ============================== ========================================== ================================= ======================== ============================== + +.. note:: + + The :attr:`~iris.coords.DimCoord.var_name` and :attr:`~iris.coords.DimCoord.circular` + metadata members are Iris specific terms, rather than recognised `CF Conventions`_ + terms. + +.. note:: + + :class:`~iris.cube.Cube` :attr:`~iris.cube.Cube.attributes` implement the + concept of dataset-level and variable-level attributes, to enable correct + NetCDF loading and saving (see :class:`~iris.cube.CubeAttrsDict` and NetCDF + :func:`~iris.fileformats.netcdf.saver.save` for more). ``attributes`` on + the other classes do not have this distinction, but the ``attributes`` + members of ALL the classes still have the same interface, and can be + compared. + + +Common Metadata API +=================== + +.. testsetup:: + + import iris + cube = iris.load_cube(iris.sample_data_path("A1B_north_america.nc")) + +As of Iris ``3.0.0``, a unified treatment of metadata has been applied +across each Iris class (:ref:`metadata members table`) to allow users +to easily manage and manipulate their metadata in a consistent way. + +This is achieved through the ``metadata`` property, which allows you to +manipulate the associated underlying metadata members as a collective. +For example, given the following :class:`~iris.cube.Cube`, + + >>> print(cube) + air_temperature / (K) (time: 240; latitude: 37; longitude: 49) + Dimension coordinates: + time x - - + latitude - x - + longitude - - x + Auxiliary coordinates: + forecast_period x - - + Scalar coordinates: + forecast_reference_time 1859-09-01 06:00:00 + height 1.5 m + Cell methods: + 0 time: mean (interval: 6 hour) + Attributes: + Conventions 'CF-1.5' + Model scenario 'A1B' + STASH m01s03i236 + source 'Data from Met Office Unified Model 6.05' + +We can easily get all of the associated metadata of the :class:`~iris.cube.Cube` +using the ``metadata`` property (note the specialised +:class:`~iris.cube.CubeAttrsDict` for the :attr:`~iris.cube.Cube.attributes`, +as mentioned earlier): + + >>> cube.metadata + CubeMetadata(standard_name='air_temperature', long_name=None, var_name='air_temperature', units=Unit('K'), attributes=CubeAttrsDict(globals={'Conventions': 'CF-1.5'}, locals={'STASH': STASH(model=1, section=3, item=236), 'Model scenario': 'A1B', 'source': 'Data from Met Office Unified Model 6.05'}), cell_methods=(CellMethod(method='mean', coord_names=('time',), intervals=('6 hour',), comments=()),)) + +We can also inspect the ``metadata`` of the ``longitude`` +:class:`~iris.coords.DimCoord` attached to the :class:`~iris.cube.Cube` in the same way: + + >>> cube.coord("longitude").metadata + DimCoordMetadata(standard_name='longitude', long_name=None, var_name='longitude', units=Unit('degrees'), attributes={}, coord_system=GeogCS(6371229.0), climatological=False, circular=False) + +Or use the ``metadata`` property again, but this time on the ``forecast_period`` +:class:`~iris.coords.AuxCoord` attached to the :class:`~iris.cube.Cube`: + + >>> cube.coord("forecast_period").metadata + CoordMetadata(standard_name='forecast_period', long_name=None, var_name='forecast_period', units=Unit('hours'), attributes={}, coord_system=None, climatological=False) + +Note that, the ``metadata`` property is available on each of the Iris `CF Conventions`_ +class containers referenced in :ref:`metadata members table`, and thus provides +a **common** and **consistent** approach to managing your metadata, which we'll +now explore a little more fully. + + +Metadata Classes +---------------- + +The ``metadata`` property will return an appropriate `namedtuple`_ metadata class +for each Iris `CF Conventions`_ class container. The metadata class returned by +each container class is shown in :ref:`metadata classes table` below, + +.. _metadata classes table: +.. table:: - Iris namedtuple metadata classes + :widths: auto + :align: center + + ========================================== ======================================================== + Container Class Metadata Class + ========================================== ======================================================== + :class:`~iris.coords.AncillaryVariable` :class:`~iris.common.metadata.AncillaryVariableMetadata` + :class:`~iris.coords.AuxCoord` :class:`~iris.common.metadata.CoordMetadata` + :class:`~iris.aux_factory.AuxCoordFactory` :class:`~iris.common.metadata.CoordMetadata` + :class:`~iris.coords.CellMeasure` :class:`~iris.common.metadata.CellMeasureMetadata` + :class:`~iris.cube.Cube` :class:`~iris.common.metadata.CubeMetadata` + :class:`~iris.coords.DimCoord` :class:`~iris.common.metadata.DimCoordMetadata` + ========================================== ======================================================== + +Akin to the behaviour of a `namedtuple`_, the metadata classes in +:ref:`metadata classes table` create **tuple-like** instances i.e., they provide a +**snapshot** of the associated metadata member **values**, which are **not +settable**, but they **may be mutable** depending on the data-type of the member. +For example, given the following ``metadata`` of a :class:`~iris.coords.DimCoord`, + + >>> longitude = cube.coord("longitude") + >>> metadata = longitude.metadata + >>> metadata + DimCoordMetadata(standard_name='longitude', long_name=None, var_name='longitude', units=Unit('degrees'), attributes={}, coord_system=GeogCS(6371229.0), climatological=False, circular=False) + +The ``metadata`` member value **is** the same as the container class member value, + + >>> metadata.attributes is longitude.attributes + True + >>> metadata.circular is longitude.circular + True + +Like a `namedtuple`_, the ``metadata`` member is **not settable**, + + >>> metadata.attributes = {"grinning face": "🙂"} + Traceback (most recent call last): + AttributeError: can't set attribute + +However, for a `dict`_ member, it **is mutable**, + + >>> metadata.attributes + {} + >>> longitude.attributes["grinning face"] = "🙂" + >>> metadata.attributes + {'grinning face': '🙂'} + >>> metadata.attributes["grinning face"] = "🙃" + >>> longitude.attributes + {'grinning face': '🙃'} + +But ``metadata`` members with simple values are **not** mutable, + + >>> metadata.circular + False + >>> longitude.circular = True + >>> metadata.circular + False + +And of course, they're also **not** settable, + + >>> metadata.circular = True + Traceback (most recent call last): + AttributeError: can't set attribute + +Note that, the ``metadata`` property re-creates a **new** instance per invocation, +with a **snapshot** of the container class metadata values at that point in time, + + >>> longitude.metadata + DimCoordMetadata(standard_name='longitude', long_name=None, var_name='longitude', units=Unit('degrees'), attributes={'grinning face': '🙃'}, coord_system=GeogCS(6371229.0), climatological=False, circular=True) + +Skip ahead to :ref:`metadata assignment ` for a fuller +discussion on options how to **set** and **get** metadata on the instance of +an Iris `CF Conventions`_ container class (:ref:`metadata classes table`). + + +Metadata Class Behaviour +------------------------ + +As mentioned previously, the metadata classes in :ref:`metadata classes table` +inherit the behaviour of a `namedtuple`_, and so act and feel like a `namedtuple`_, +just as you might expect. For example, given the following ``metadata``, + + >>> metadata + DimCoordMetadata(standard_name='longitude', long_name=None, var_name='longitude', units=Unit('degrees'), attributes={'grinning face': '🙃'}, coord_system=GeogCS(6371229.0), climatological=False, circular=False) + +We can use the `namedtuple._make`_ method to create a **new** +:class:`~iris.common.metadata.DimCoordMetadata` instance from an existing sequence +or iterable. The number and order of the values used in the iterable must match that +of the associated `namedtuple._fields`_, which is discussed later, + + >>> values = (1, 2, 3, 4, 5, 6, 7, 8) + >>> metadata._make(values) + DimCoordMetadata(standard_name=1, long_name=2, var_name=3, units=4, attributes=5, coord_system=6, climatological=7, circular=8) + +Note that, `namedtuple._make`_ is a class method, and so it is possible to +create a **new** instance directly from the metadata class itself, + + >>> from iris.common import DimCoordMetadata + >>> DimCoordMetadata._make(values) + DimCoordMetadata(standard_name=1, long_name=2, var_name=3, units=4, attributes=5, coord_system=6, climatological=7, circular=8) + +It is also possible to easily convert ``metadata`` to an `dict`_ +using the `namedtuple._asdict`_ method. This can be particularly handy when a +standard Python built-in container is required to represent your ``metadata``, + + >>> metadata._asdict() + {'standard_name': 'longitude', 'long_name': None, 'var_name': 'longitude', 'units': Unit('degrees'), 'attributes': {'grinning face': '🙃'}, 'coord_system': GeogCS(6371229.0), 'climatological': False, 'circular': False} + +Using the `namedtuple._replace`_ method allows you to create a new metadata +class instance, but replacing specified members with **new** associated values, + + >>> metadata + DimCoordMetadata(standard_name='longitude', long_name=None, var_name='longitude', units=Unit('degrees'), attributes={'grinning face': '🙃'}, coord_system=GeogCS(6371229.0), climatological=False, circular=False) + >>> metadata._replace(standard_name=None, units=None) + DimCoordMetadata(standard_name=None, long_name=None, var_name='longitude', units=None, attributes={'grinning face': '🙃'}, coord_system=GeogCS(6371229.0), climatological=False, circular=False) + +Another very useful method from the `namedtuple`_ toolkit is `namedtuple._fields`_. +This method returns a tuple of strings listing the ``metadata`` members, in a +fixed order. This allows you to easily iterate over the metadata class members, +for what ever purpose you may require, e.g., + + >>> metadata._fields + ('standard_name', 'long_name', 'var_name', 'units', 'attributes', 'coord_system', 'climatological', 'circular') + + >>> tuple([getattr(metadata, member) for member in metadata._fields]) + ('longitude', None, 'longitude', Unit('degrees'), {'grinning face': '🙃'}, GeogCS(6371229.0), False, False) + + >>> tuple([getattr(metadata, member) for member in metadata._fields if member.endswith("name")]) + ('longitude', None, 'longitude') + +Note that, `namedtuple._fields`_ is also a class method, so you don't need +an instance to determine the members of a metadata class, e.g., + + >>> from iris.common import CubeMetadata + >>> CubeMetadata._fields + ('standard_name', 'long_name', 'var_name', 'units', 'attributes', 'cell_methods') + +Aside from the benefit of metadata classes inheriting behaviour and state +from `namedtuple`_, further additional rich behaviour is also available, +which we explore next. + + +.. _richer metadata: + +Richer Metadata Behaviour +------------------------- + +.. testsetup:: richer-metadata + + import iris + import numpy as np + from iris.common import CoordMetadata + cube = iris.load_cube(iris.sample_data_path("A1B_north_america.nc")) + longitude = cube.coord("longitude") + +The metadata classes from :ref:`metadata classes table` support additional +behaviour above and beyond that of the standard Python `namedtuple`_, which +allows you to easily **compare**, **combine**, **convert** and understand the +**difference** between your ``metadata`` instances. + + +.. _metadata equality: + +Metadata Equality +^^^^^^^^^^^^^^^^^ + +The metadata classes support both **equality** (``__eq__``) and **inequality** +(``__ne__``), but no other `rich comparison`_ operators are implemented. +This is simply because there is no obvious ordering to any collective of metadata +members, as defined in :ref:`metadata members table`. + +For example, given the following :class:`~iris.coords.DimCoord`, + +.. doctest:: richer-metadata + + >>> longitude.metadata + DimCoordMetadata(standard_name='longitude', long_name=None, var_name='longitude', units=Unit('degrees'), attributes={}, coord_system=GeogCS(6371229.0), climatological=False, circular=False) + +We can compare ``metadata`` using the ``==`` operator, as you may naturally +expect, + +.. doctest:: richer-metadata + + >>> longitude.metadata == longitude.metadata + True + +Or alternatively, using the ``equal`` method instead, + +.. doctest:: richer-metadata + + >>> longitude.metadata.equal(longitude.metadata) + True + +Note that, the ``==`` operator (``__eq__``) and the ``equal`` method are +both functionally equivalent. However, the ``equal`` method also provides +a means to enable **lenient** equality, as discussed in :ref:`lenient equality`. + + +.. _strict equality: + +Strict Equality +""""""""""""""" + +By default, metadata class equality will perform a **strict** comparison between +each associated ``metadata`` member. If **any** ``metadata`` member has a +different value, then the result of the operation will be ``False``. For example, + +.. doctest:: richer-metadata + + >>> other = longitude.metadata._replace(standard_name=None) + >>> other + DimCoordMetadata(standard_name=None, long_name=None, var_name='longitude', units=Unit('degrees'), attributes={}, coord_system=GeogCS(6371229.0), climatological=False, circular=False) + >>> longitude.metadata == other + False + +.. doctest:: richer-metadata + + >>> longitude.attributes = {"grinning face": "🙂"} + >>> other = longitude.metadata._replace(attributes={"grinning face": "🙃"}) + >>> other + DimCoordMetadata(standard_name='longitude', long_name=None, var_name='longitude', units=Unit('degrees'), attributes={'grinning face': '🙃'}, coord_system=GeogCS(6371229.0), climatological=False, circular=False) + >>> longitude.metadata == other + False + +One further point worth highlighting is it is possible for `NumPy`_ scalars +and arrays to appear in the ``attributes`` `dict`_ of some Iris metadata class +instances. Normally, this would cause issues. For example, + +.. doctest:: richer-metadata + + >>> simply = {"one": np.int32(1), "two": np.array([1.0, 2.0])} + >>> simply + {'one': 1, 'two': array([1., 2.])} + >>> fruity = {"one": np.int32(1), "two": np.array([1.0, 2.0])} + >>> fruity + {'one': 1, 'two': array([1., 2.])} + >>> simply == fruity + Traceback (most recent call last): + ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all() + +However, metadata class equality is rich enough to handle this eventuality, + +.. doctest:: richer-metadata + + >>> metadata1 = cube.metadata._replace(attributes=simply) + >>> metadata2 = cube.metadata._replace(attributes=fruity) + >>> metadata1 + CubeMetadata(standard_name='air_temperature', long_name=None, var_name='air_temperature', units=Unit('K'), attributes={'one': 1, 'two': array([1., 2.])}, cell_methods=(CellMethod(method='mean', coord_names=('time',), intervals=('6 hour',), comments=()),)) + >>> metadata2 + CubeMetadata(standard_name='air_temperature', long_name=None, var_name='air_temperature', units=Unit('K'), attributes={'one': 1, 'two': array([1., 2.])}, cell_methods=(CellMethod(method='mean', coord_names=('time',), intervals=('6 hour',), comments=()),)) + +.. doctest:: richer-metadata + + >>> metadata1 == metadata2 + True + +.. doctest:: richer-metadata + + >>> metadata1 + CubeMetadata(standard_name='air_temperature', long_name=None, var_name='air_temperature', units=Unit('K'), attributes={'one': 1, 'two': array([1., 2.])}, cell_methods=(CellMethod(method='mean', coord_names=('time',), intervals=('6 hour',), comments=()),)) + >>> metadata2 = cube.metadata._replace(attributes={"one": np.int32(1), "two": np.array([1000.0, 2000.0])}) + >>> metadata2 + CubeMetadata(standard_name='air_temperature', long_name=None, var_name='air_temperature', units=Unit('K'), attributes={'one': 1, 'two': array([1000., 2000.])}, cell_methods=(CellMethod(method='mean', coord_names=('time',), intervals=('6 hour',), comments=()),)) + >>> metadata1 == metadata2 + False + + +.. _compare like: + +Comparing Like With Like +"""""""""""""""""""""""" + +So far in our journey through metadata class equality, we have only considered +cases where the operands are instances of the **same** type. It is possible to +compare instances of **different** metadata classes, but the result will always +be ``False``, + +.. doctest:: richer-metadata + + >>> cube.metadata == longitude.metadata + False + +The reason different metadata classes cannot be compared is simply because each +metadata class contains **different** members, as shown in +:ref:`metadata members table`. However, there is an exception to the rule... + + +.. _exception rule: + +Exception to the Rule +~~~~~~~~~~~~~~~~~~~~~ + +In general, **different** metadata classes cannot be compared, however support +is provided for comparing :class:`~iris.common.metadata.CoordMetadata` and +:class:`~iris.common.metadata.DimCoordMetadata` metadata classes. For example, +consider the following :class:`~iris.common.metadata.DimCoordMetadata`, + +.. doctest:: richer-metadata + + >>> latitude = cube.coord("latitude") + >>> latitude.metadata + DimCoordMetadata(standard_name='latitude', long_name=None, var_name='latitude', units=Unit('degrees'), attributes={}, coord_system=GeogCS(6371229.0), climatological=False, circular=False) + +Next we create a new :class:`~iris.common.metadata.CoordMetadata` instance from +the :class:`~iris.common.metadata.DimCoordMetadata` instance, + +.. doctest:: richer-metadata + + >>> kwargs = latitude.metadata._asdict() + >>> del kwargs["circular"] + >>> metadata = CoordMetadata(**kwargs) + >>> metadata + CoordMetadata(standard_name='latitude', long_name=None, var_name='latitude', units=Unit('degrees'), attributes={}, coord_system=GeogCS(6371229.0), climatological=False) + +.. hint:: + + Alternatively, use the ``from_metadata`` class method instead, see + :ref:`metadata conversion`. + +Comparing the instances confirms that equality is indeed supported between +:class:`~iris.common.metadata.DimCoordMetadata` and :class:`~iris.common.metadata.CoordMetadata` +classes, + +.. doctest:: richer-metadata + + >>> latitude.metadata == metadata + True + +The reason for this behaviour is primarily historical. The ``circular`` +member has **never** been used by the ``__eq__`` operator when comparing an +:class:`~iris.coords.AuxCoord` and a :class:`~iris.coords.DimCoord`. Therefore +for consistency, this behaviour is also extended to ``__eq__`` for the associated +container metadata classes. + +However, note that the ``circular`` member **is used** by the ``__eq__`` operator +when comparing one :class:`~iris.coords.DimCoord` to another. This also applies +when comparing :class:`~iris.common.metadata.DimCoordMetadata`. + +This exception to the rule for :ref:`equality ` also applies +to the :ref:`difference ` and :ref:`combine ` +methods of metadata classes. + + +.. _metadata difference: + +Metadata Difference +^^^^^^^^^^^^^^^^^^^ + +Being able to compare metadata is valuable, especially when we have the +convenience of being able to do this easily with metadata classes. However, +when the result of comparing two metadata instances is ``False``, it begs +the question, "**what's the difference?**" + +Well, this is where we pull the ``difference`` method out of the metadata +toolbox. First, let's create some ``metadata`` to compare, + +.. doctest:: richer-metadata + + >>> longitude = cube.coord("longitude") + >>> longitude.metadata + DimCoordMetadata(standard_name='longitude', long_name=None, var_name='longitude', units=Unit('degrees'), attributes={'grinning face': '🙂'}, coord_system=GeogCS(6371229.0), climatological=False, circular=False) + +Now, we replace some members of the :class:`~iris.common.metadata.DimCoordMetadata` with +different values, + +.. doctest:: richer-metadata + + >>> from cf_units import Unit + >>> metadata = longitude.metadata._replace(long_name="lon", var_name="lon", units=Unit("radians")) + >>> metadata + DimCoordMetadata(standard_name='longitude', long_name='lon', var_name='lon', units=Unit('radians'), attributes={'grinning face': '🙂'}, coord_system=GeogCS(6371229.0), climatological=False, circular=False) + +First, confirm that the ``metadata`` is different, + +.. doctest:: richer-metadata + + >>> longitude.metadata != metadata + True + +As expected, the ``metadata`` is different. Now, let's answer the question, +"**what's the difference?**", + +.. doctest:: richer-metadata + + >>> longitude.metadata.difference(metadata) + DimCoordMetadata(standard_name=None, long_name=(None, 'lon'), var_name=('longitude', 'lon'), units=(Unit('degrees'), Unit('radians')), attributes=None, coord_system=None, climatological=None, circular=None) + +The ``difference`` method returns a :class:`~iris.common.metadata.DimCoordMetadata` instance, when +there is **at least** one ``metadata`` member with a different value, where, + +- ``None`` means that there was **no** difference for the member, +- a `tuple`_ contains the two different associated values for the member + +Given our example, only the ``long_name``, ``var_name`` and ``units`` members +have different values, as expected. Note that, the ``difference`` method **is +not** commutative. The order of the tuple member values is the same order +of the metadata class instances being compared, e.g., changing the +``difference`` instance order is reflected in the result, + +.. doctest:: richer-metadata + + >>> metadata.difference(longitude.metadata) + DimCoordMetadata(standard_name=None, long_name=('lon', None), var_name=('lon', 'longitude'), units=(Unit('radians'), Unit('degrees')), attributes=None, coord_system=None, climatological=None, circular=None) + +Also, when the ``metadata`` being compared **is identical**, then ``None`` +is simply returned, + +.. doctest:: richer-metadata + + >>> metadata.difference(metadata) is None + True + +It's worth highlighting that for the ``attributes`` `dict`_ member, only +those keys with **different values** or **missing keys** will be returned by the +``difference`` method. For example, let's customise the ``attributes`` member of +the following :class:`~iris.common.metadata.DimCoordMetadata`, + +.. doctest:: richer-metadata + + >>> attributes = {"grinning face": "😀", "neutral face": "😐"} + >>> longitude.attributes = attributes + >>> longitude.metadata + DimCoordMetadata(standard_name='longitude', long_name=None, var_name='longitude', units=Unit('degrees'), attributes={'grinning face': '😀', 'neutral face': '😐'}, coord_system=GeogCS(6371229.0), climatological=False, circular=False) + +Then create another :class:`~iris.common.metadata.DimCoordMetadata` with a different +``attributes`` `dict`_, namely, + +- the ``grinning face`` key has the **same value**, +- the ``neutral face`` key has a **different value**, +- the ``upside-down face`` key is **new** + +.. doctest:: richer-metadata + + >>> attributes = {"grinning face": "😀", "neutral face": "😜", "upside-down face": "🙃"} + >>> metadata = longitude.metadata._replace(attributes=attributes) + >>> metadata + DimCoordMetadata(standard_name='longitude', long_name=None, var_name='longitude', units=Unit('degrees'), attributes={'grinning face': '😀', 'neutral face': '😜', 'upside-down face': '🙃'}, coord_system=GeogCS(6371229.0), climatological=False, circular=False) + +Now, let's compare the two above instances and see what ``attributes`` member differences we get, + +.. doctest:: richer-metadata + + >>> longitude.metadata.difference(metadata) # doctest: +SKIP + DimCoordMetadata(standard_name=None, long_name=None, var_name=None, units=None, attributes=({'neutral face': '😐'}, {'neutral face': '😜', 'upside-down face': '🙃'}), coord_system=None, climatological=None, circular=None) + + +.. _diff like: + +Diffing Like With Like +"""""""""""""""""""""" + +As discussed in :ref:`compare like`, it only makes sense to determine the +``difference`` between **similar** metadata class instances. However, note that +the :ref:`exception to the rule ` still applies here i.e., +support is provided between :class:`~iris.common.metadata.CoordMetadata` and +:class:`~iris.common.metadata.DimCoordMetadata` metadata classes. + +For example, given the following :class:`~iris.coords.AuxCoord` and +:class:`~iris.coords.DimCoord`, + +.. doctest:: richer-metadata + + >>> forecast_period = cube.coord("forecast_period") + >>> latitude = cube.coord("latitude") + +We can inspect their associated ``metadata``, + +.. doctest:: richer-metadata + + >>> forecast_period.metadata + CoordMetadata(standard_name='forecast_period', long_name=None, var_name='forecast_period', units=Unit('hours'), attributes={}, coord_system=None, climatological=False) + >>> latitude.metadata + DimCoordMetadata(standard_name='latitude', long_name=None, var_name='latitude', units=Unit('degrees'), attributes={}, coord_system=GeogCS(6371229.0), climatological=False, circular=False) + +Before comparing them to determine the values of metadata members that are different, + +.. doctest:: richer-metadata + + >>> forecast_period.metadata.difference(latitude.metadata) + CoordMetadata(standard_name=('forecast_period', 'latitude'), long_name=None, var_name=('forecast_period', 'latitude'), units=(Unit('hours'), Unit('degrees')), attributes=None, coord_system=(None, GeogCS(6371229.0)), climatological=None) + +.. doctest:: richer-metadata + + >>> latitude.metadata.difference(forecast_period.metadata) + DimCoordMetadata(standard_name=('latitude', 'forecast_period'), long_name=None, var_name=('latitude', 'forecast_period'), units=(Unit('degrees'), Unit('hours')), attributes=None, coord_system=(GeogCS(6371229.0), None), climatological=None, circular=(False, None)) + +In general, however, comparing **different** metadata classes will result in a +``TypeError`` being raised, + +.. doctest:: richer-metadata + + >>> cube.metadata.difference(longitude.metadata) + Traceback (most recent call last): + TypeError: Cannot differ 'CubeMetadata' with . + + +.. _metadata combine: + +Metadata Combination +^^^^^^^^^^^^^^^^^^^^ + +.. testsetup:: metadata-combine + + import iris + cube = iris.load_cube(iris.sample_data_path("A1B_north_america.nc")) + longitude = cube.coord("longitude") + +So far we've seen how to :ref:`compare metadata `, and also how +to determine the :ref:`difference between metadata `. Now we +take the next step, and explore how to combine metadata together using the ``combine`` +metadata class method. + +For example, consider the following :class:`~iris.common.metadata.CubeMetadata`, + +.. doctest:: metadata-combine + + >>> cube.metadata + CubeMetadata(standard_name='air_temperature', long_name=None, var_name='air_temperature', units=Unit('K'), attributes=CubeAttrsDict(globals={'Conventions': 'CF-1.5'}, locals={'STASH': STASH(model=1, section=3, item=236), 'Model scenario': 'A1B', 'source': 'Data from Met Office Unified Model 6.05'}), cell_methods=(CellMethod(method='mean', coord_names=('time',), intervals=('6 hour',), comments=()),)) + +We can perform the **identity function** by comparing the metadata with itself, + +.. doctest:: metadata-combine + + >>> metadata = cube.metadata.combine(cube.metadata) + >>> cube.metadata == metadata + True + +As you might expect, combining identical metadata returns metadata that is +also identical. + +The ``combine`` method will always return **a new** metadata class instance, +where each metadata member is either ``None`` or populated with a **common value**. +Let's clarify this, by combining our above :class:`~iris.common.metadata.CubeMetadata` +with another instance that's identical apart from its ``standard_name`` member, +which is replaced with a **different value**, + +.. doctest:: metadata-combine + + >>> metadata = cube.metadata._replace(standard_name="air_pressure_at_sea_level") + >>> metadata != cube.metadata + True + >>> metadata.combine(cube.metadata) # doctest: +SKIP + CubeMetadata(standard_name=None, long_name=None, var_name='air_temperature', units=Unit('K'), attributes={'STASH': STASH(model=1, section=3, item=236), 'Model scenario': 'A1B', 'source': 'Data from Met Office Unified Model 6.05', 'Conventions': 'CF-1.5'}, cell_methods=(CellMethod(method='mean', coord_names=('time',), intervals=('6 hour',), comments=()),)) + +The ``combine`` method combines metadata by performing a **strict** comparison +between each of the associated metadata member values, + +- if the values are **different**, then the combined result is ``None`` +- otherwise, the combined result is the **common value** + +Let's reinforce this behaviour, but this time by combining metadata where the +``attributes`` `dict`_ member is different, where, + +- the ``STASH`` and ``source`` keys are **missing**, +- the ``Model scenario`` key has the **same value**, +- the ``Conventions`` key has a **different value**, +- the ``grinning face`` key is **new** + +.. doctest:: metadata-combine + + >>> attributes = {"Model scenario": "A1B", "Conventions": "CF-1.8", "grinning face": "🙂" } + >>> metadata = cube.metadata._replace(attributes=attributes) + >>> metadata != cube.metadata + True + >>> metadata.combine(cube.metadata).attributes + CubeAttrsDict(globals={}, locals={'Model scenario': 'A1B'}) + +The combined result for the ``attributes`` member only contains those +**common keys** with **common values**. + +Note that, the ``combine`` method is **commutative**, + +.. doctest:: metadata-combine + + >>> cube.metadata.combine(metadata) == metadata.combine(cube.metadata) + True + +Although, this is only the case when combining instances of the **same** +metadata class. This is explored in a little further detail next. + + +.. _combine like: + +Combine Like With Like +"""""""""""""""""""""" + +Akin to the :ref:`equal ` and +:ref:`difference ` methods, only instances of **similar** +metadata classes can be combined, otherwise a ``TypeError`` is raised, + +.. doctest:: metadata-combine + + >>> cube.metadata.combine(longitude.metadata) + Traceback (most recent call last): + TypeError: Cannot combine 'CubeMetadata' with . + +Again, however, the :ref:`exception to the rule ` also applies +here i.e., support is provided between :class:`~iris.common.metadata.CoordMetadata` and +:class:`~iris.common.metadata.DimCoordMetadata` metadata classes. + +For example, we can ``combine`` the metadata of the following +:class:`~iris.coords.AuxCoord` and :class:`~iris.coords.DimCoord`, + +.. doctest:: metadata-combine + + >>> forecast_period = cube.coord("forecast_period") + >>> longitude = cube.coord("longitude") + +First, let's see their associated metadata, + +.. doctest:: metadata-combine + + >>> forecast_period.metadata + CoordMetadata(standard_name='forecast_period', long_name=None, var_name='forecast_period', units=Unit('hours'), attributes={}, coord_system=None, climatological=False) + >>> longitude.metadata + DimCoordMetadata(standard_name='longitude', long_name=None, var_name='longitude', units=Unit('degrees'), attributes={}, coord_system=GeogCS(6371229.0), climatological=False, circular=False) + +Before combining their metadata together, + +.. doctest:: metadata-combine + + >>> forecast_period.metadata.combine(longitude.metadata) + CoordMetadata(standard_name=None, long_name=None, var_name=None, units=None, attributes={}, coord_system=None, climatological=False) + >>> longitude.metadata.combine(forecast_period.metadata) + DimCoordMetadata(standard_name=None, long_name=None, var_name=None, units=None, attributes={}, coord_system=None, climatological=False, circular=None) + +However, note that commutativity in this case cannot be honoured, for obvious reasons. + + +.. _metadata conversion: + +Metadata Conversion +^^^^^^^^^^^^^^^^^^^ + +.. testsetup:: metadata-convert + + import iris + from iris.common import DimCoordMetadata + cube = iris.load_cube(iris.sample_data_path("A1B_north_america.nc")) + longitude = cube.coord("longitude") + +In general, the :ref:`equal `, :ref:`difference `, +and :ref:`combine ` methods only support operations on instances +of the same metadata class (see :ref:`exception to the rule `). + +However, metadata may be converted from one metadata class to another using +the ``from_metadata`` class method. For example, given the following +:class:`~iris.common.metadata.CubeMetadata`, + +.. doctest:: metadata-convert + + >>> cube.metadata + CubeMetadata(standard_name='air_temperature', long_name=None, var_name='air_temperature', units=Unit('K'), attributes=CubeAttrsDict(globals={'Conventions': 'CF-1.5'}, locals={'STASH': STASH(model=1, section=3, item=236), 'Model scenario': 'A1B', 'source': 'Data from Met Office Unified Model 6.05'}), cell_methods=(CellMethod(method='mean', coord_names=('time',), intervals=('6 hour',), comments=()),)) + +We can easily convert it to a :class:`~iris.common.metadata.DimCoordMetadata` instance +using ``from_metadata``, + +.. doctest:: metadata-convert + + >>> newmeta = DimCoordMetadata.from_metadata(cube.metadata) + >>> print(newmeta) + DimCoordMetadata(standard_name=air_temperature, var_name=air_temperature, units=K, attributes={'Conventions': 'CF-1.5', 'STASH': STASH(model=1, section=3, item=236), 'Model scenario': 'A1B', 'source': 'Data from Met Office Unified Model 6.05'}) + +By examining :ref:`metadata members table`, we can see that the +:class:`~iris.cube.Cube` and :class:`~iris.coords.DimCoord` container +classes share the following common metadata members, + +- ``standard_name``, +- ``long_name``, +- ``var_name``, +- ``units``, +- ``attributes`` + +As such, all of these metadata members of the resultant +:class:`~iris.common.metadata.DimCoordMetadata` instance are populated from the associated +:class:`~iris.common.metadata.CubeMetadata` instance members. However, a +:class:`~iris.common.metadata.CubeMetadata` class does not contain the following +:class:`~iris.common.metadata.DimCoordMetadata` members, + +- ``coords_system``, +- ``climatological``, +- ``circular`` + +Thus these particular metadata members are set to ``None`` in the resultant +:class:`~iris.common.metadata.DimCoordMetadata` instance. + +Note that, the ``from_metadata`` method is also available on a metadata +class instance, + +.. doctest:: metadata-convert + + >>> newmeta = longitude.metadata.from_metadata(cube.metadata) + >>> print(newmeta) + DimCoordMetadata(standard_name=air_temperature, var_name=air_temperature, units=K, attributes={'Conventions': 'CF-1.5', 'STASH': STASH(model=1, section=3, item=236), 'Model scenario': 'A1B', 'source': 'Data from Met Office Unified Model 6.05'}) + +.. _metadata assignment: + +Metadata Assignment +^^^^^^^^^^^^^^^^^^^ + +.. testsetup:: metadata-assign + + import iris + cube = iris.load_cube(iris.sample_data_path("A1B_north_america.nc")) + longitude = cube.coord("longitude") + original = longitude.copy() + latitude = cube.coord("latitude") + +The ``metadata`` property available on each Iris `CF Conventions`_ container +class (:ref:`metadata classes table`) can not only be used **to get** +the metadata of an instance, but also **to set** the metadata on an instance. + +For example, given the following :class:`~iris.common.metadata.DimCoordMetadata` of the +``longitude`` coordinate, + +.. doctest:: metadata-assign + + >>> longitude.metadata + DimCoordMetadata(standard_name='longitude', long_name=None, var_name='longitude', units=Unit('degrees'), attributes={}, coord_system=GeogCS(6371229.0), climatological=False, circular=False) + +We can assign to it directly using the :class:`~iris.common.metadata.DimCoordMetadata` of the ``latitude`` +coordinate, + +.. doctest:: metadata-assign + + >>> latitude.metadata + DimCoordMetadata(standard_name='latitude', long_name=None, var_name='latitude', units=Unit('degrees'), attributes={}, coord_system=GeogCS(6371229.0), climatological=False, circular=False) + >>> longitude.metadata = latitude.metadata + >>> longitude.metadata + DimCoordMetadata(standard_name='latitude', long_name=None, var_name='latitude', units=Unit('degrees'), attributes={}, coord_system=GeogCS(6371229.0), climatological=False, circular=False) + + +Assign by Iterable +"""""""""""""""""" + +It is also possible to assign to the ``metadata`` property of an Iris +`CF Conventions`_ container with an iterable containing the **correct +number** of associated member values, e.g., + +.. doctest:: metadata-assign + + >>> values = [getattr(latitude, member) for member in latitude.metadata._fields] + >>> longitude.metadata = values + >>> longitude.metadata + DimCoordMetadata(standard_name='latitude', long_name=None, var_name='latitude', units=Unit('degrees'), attributes={}, coord_system=GeogCS(6371229.0), climatological=False, circular=False) + + +Assign by Namedtuple +"""""""""""""""""""" + +A `namedtuple`_ may also be used to assign to the ``metadata`` property of an +Iris `CF Conventions`_ container. For example, let's first create a custom +namedtuple class, + +.. doctest:: metadata-assign + + >>> from collections import namedtuple + >>> Metadata = namedtuple("Metadata", ["standard_name", "long_name", "var_name", "units", "attributes", "coord_system", "climatological", "circular"]) + +Now create an instance of this custom namedtuple class, and populate it, + +.. doctest:: metadata-assign + + >>> metadata = Metadata(*values) + >>> metadata + Metadata(standard_name='latitude', long_name=None, var_name='latitude', units=Unit('degrees'), attributes={}, coord_system=GeogCS(6371229.0), climatological=False, circular=False) + +Now we can use the custom namedtuple instance to assign directly to the metadata +of the ``longitude`` coordinate, + +.. doctest:: metadata-assign + + >>> longitude.metadata = metadata + >>> longitude.metadata + DimCoordMetadata(standard_name='latitude', long_name=None, var_name='latitude', units=Unit('degrees'), attributes={}, coord_system=GeogCS(6371229.0), climatological=False, circular=False) + + +Assign by Mapping +""""""""""""""""" + +It is also possible to assign to the ``metadata`` property using a `mapping`_, +such as a `dict`_, + +.. doctest:: metadata-assign + + >>> mapping = latitude.metadata._asdict() + >>> mapping + {'standard_name': 'latitude', 'long_name': None, 'var_name': 'latitude', 'units': Unit('degrees'), 'attributes': {}, 'coord_system': GeogCS(6371229.0), 'climatological': False, 'circular': False} + >>> longitude.metadata = mapping + >>> longitude.metadata + DimCoordMetadata(standard_name='latitude', long_name=None, var_name='latitude', units=Unit('degrees'), attributes={}, coord_system=GeogCS(6371229.0), climatological=False, circular=False) + +Support is also provided for assigning a **partial** mapping, for example, + +.. testcode:: metadata-assign + :hide: + + longitude = original + +.. doctest:: metadata-assign + + >>> longitude.metadata + DimCoordMetadata(standard_name='longitude', long_name=None, var_name='longitude', units=Unit('degrees'), attributes={}, coord_system=GeogCS(6371229.0), climatological=False, circular=False) + >>> longitude.metadata = dict(var_name="lat", units="radians", circular=True) + >>> longitude.metadata + DimCoordMetadata(standard_name='longitude', long_name=None, var_name='lat', units=Unit('radians'), attributes={}, coord_system=GeogCS(6371229.0), climatological=False, circular=True) + +Indeed, it's also possible to assign to the ``metadata`` property with a +**different** metadata class instance, + +.. testcode:: metadata-assign + :hide: + + longitude.metadata = dict(var_name="longitude", units="degrees", circular=False) + +.. doctest:: metadata-assign + + >>> longitude.metadata + DimCoordMetadata(standard_name='longitude', long_name=None, var_name='longitude', units=Unit('degrees'), attributes={}, coord_system=GeogCS(6371229.0), climatological=False, circular=False) + >>> longitude.metadata = cube.metadata + >>> longitude.metadata + DimCoordMetadata(standard_name='air_temperature', long_name=None, var_name='air_temperature', units=Unit('K'), attributes={'Conventions': 'CF-1.5', 'STASH': STASH(model=1, section=3, item=236), 'Model scenario': 'A1B', 'source': 'Data from Met Office Unified Model 6.05'}, coord_system=GeogCS(6371229.0), climatological=False, circular=False) + +Note that, only **common** metadata members will be assigned new associated +values. All other metadata members will be left unaltered. + + +.. _data about data: https://en.wikipedia.org/wiki/Metadata +.. _data attribute: https://docs.python.org/3/tutorial/classes.html#instance-objects +.. _dict: https://docs.python.org/3/library/stdtypes.html#mapping-types-dict +.. _Ancillary Data: https://cfconventions.org/Data/cf-conventions/cf-conventions-1.8/cf-conventions.html#ancillary-data +.. _CF Conventions: https://cfconventions.org/ +.. _Cell Measures: https://cfconventions.org/Data/cf-conventions/cf-conventions-1.8/cf-conventions.html#cell-measures +.. _Flags: https://cfconventions.org/Data/cf-conventions/cf-conventions-1.8/cf-conventions.html#flags +.. _mapping: https://docs.python.org/3/glossary.html#term-mapping +.. _namedtuple: https://docs.python.org/3/library/collections.html#collections.namedtuple +.. _namedtuple._make: https://docs.python.org/3/library/collections.html#collections.somenamedtuple._make +.. _namedtuple._asdict: https://docs.python.org/3/library/collections.html#collections.somenamedtuple._asdict +.. _namedtuple._replace: https://docs.python.org/3/library/collections.html#collections.somenamedtuple._replace +.. _namedtuple._fields: https://docs.python.org/3/library/collections.html#collections.somenamedtuple._fields +.. _NetCDF: https://www.unidata.ucar.edu/software/netcdf/ +.. _NetCDF CF Metadata Conventions: https://cfconventions.org/ +.. _NumPy: https://github.com/numpy/numpy +.. _Parametric Vertical Coordinate: https://cfconventions.org/Data/cf-conventions/cf-conventions-1.8/cf-conventions.html#parametric-vertical-coordinate +.. _rich comparison: https://www.python.org/dev/peps/pep-0207/ +.. _SciTools/iris: https://github.com/SciTools/iris +.. _tuple: https://docs.python.org/3/library/stdtypes.html#tuples diff --git a/docs/iris/src/whitepapers/missing_data_handling.rst b/docs/src/further_topics/missing_data_handling.rst similarity index 82% rename from docs/iris/src/whitepapers/missing_data_handling.rst rename to docs/src/further_topics/missing_data_handling.rst index cd6ef038c2..a461a44456 100644 --- a/docs/iris/src/whitepapers/missing_data_handling.rst +++ b/docs/src/further_topics/missing_data_handling.rst @@ -22,6 +22,7 @@ On load, any fill-value or missing data value defined in the loaded dataset should be used as the ``fill_value`` of the NumPy masked array data attribute of the :class:`~iris.cube.Cube`. This will only appear when the cube's data is realised. +.. _missing_data_saving: Saving ------ @@ -37,7 +38,8 @@ For example:: .. note:: Not all savers accept the ``fill_value`` keyword argument. -Iris will check for and issue warnings of fill-value 'collisions'. +Iris will check for and issue warnings of fill-value 'collisions' (exception: +**NetCDF**, see the heading below). This basically means that whenever there are unmasked values that would read back as masked, we issue a warning and suggest a workaround. @@ -51,6 +53,8 @@ This will occur in the following cases: NetCDF ~~~~~~ +:term:`NetCDF Format` + NetCDF is a special case, because all ordinary variable data is "potentially masked", owing to the use of default fill values. The default fill-value used depends on the type of the variable data. @@ -64,6 +68,16 @@ The exceptions to this are: * Small integers create problems by *not* having the exemption applied to byte data. Thus, in principle, ``int32`` data cannot use the full range of 2**16 valid values. +Warnings are not issued for NetCDF fill value collisions. Increasingly large +and complex parallel I/O operations unfortunately made this feature +un-maintainable and it was retired in Iris 3.9 (:pull:`5833`). + +If you need to know about collisions then you can perform your own checks ahead +of saving. Such operations can be run lazily (:term:`Lazy Data`). Here is an +example:: + + >>> default_fill = netCDF4.default_fillvals[my_cube.dtype.str[1:]] + >>> fill_present = (my_cube.lazy_data() == default_fill).any().compute() Merging ------- @@ -73,7 +87,7 @@ all have the same fill-value. If the components have differing fill-values, a default fill-value will be used instead. -Other operations +Other Operations ---------------- Other operations, such as :class:`~iris.cube.Cube` arithmetic operations, diff --git a/docs/src/further_topics/netcdf_io.rst b/docs/src/further_topics/netcdf_io.rst new file mode 100644 index 0000000000..4e1c32b22f --- /dev/null +++ b/docs/src/further_topics/netcdf_io.rst @@ -0,0 +1,182 @@ +.. testsetup:: chunk_control + + import iris + from iris.fileformats.netcdf.loader import CHUNK_CONTROL + + from pathlib import Path + import dask + import shutil + import tempfile + + tmp_dir = Path(tempfile.mkdtemp()) + tmp_filepath = tmp_dir / "tmp.nc" + + cube = iris.load(iris.sample_data_path("E1_north_america.nc"))[0] + iris.save(cube, tmp_filepath, chunksizes=(120, 37, 49)) + old_dask = dask.config.get("array.chunk-size") + dask.config.set({'array.chunk-size': '500KiB'}) + + +.. testcleanup:: chunk_control + + dask.config.set({'array.chunk-size': old_dask}) + shutil.rmtree(tmp_dir) + +.. _netcdf_io: + +============================= +NetCDF I/O Handling in Iris +============================= + +This document provides a basic account of how Iris loads and saves NetCDF files. + +.. admonition:: Under Construction + + This document is still a work in progress, so might include blank or unfinished sections, + watch this space! + + +Chunk Control +-------------- + +Default Chunking +^^^^^^^^^^^^^^^^ + +Chunks are, by default, optimised by Iris on load. This will automatically +decide the best chunksize for your data without any user input. This is +calculated based on a number of factors, including: + +- File Variable Chunking +- Full Variable Shape +- Dask Default Chunksize +- Dimension Order: Earlier (outer) dimensions will be prioritised to be split over later (inner) dimensions. + +.. doctest:: chunk_control + + >>> cube = iris.load_cube(tmp_filepath) + >>> + >>> print(cube.shape) + (240, 37, 49) + >>> print(cube.core_data().chunksize) + (60, 37, 49) + +For more user control, functionality was updated in :pull:`5588`, with the +creation of the :data:`iris.fileformats.netcdf.loader.CHUNK_CONTROL` class. + +Custom Chunking: Set +^^^^^^^^^^^^^^^^^^^^ + +There are three context managers within :data:`~iris.fileformats.netcdf.loader.CHUNK_CONTROL`. The most basic is +:meth:`~iris.fileformats.netcdf.loader.ChunkControl.set`. This allows you to specify the chunksize for each dimension, +and to specify a ``var_name`` specifically to change. + +Using ``-1`` in place of a chunksize will ensure the chunksize stays the same +as the shape, i.e. no optimisation occurs on that dimension. + +.. doctest:: chunk_control + + >>> with CHUNK_CONTROL.set("air_temperature", time=180, latitude=-1, longitude=25): + ... cube = iris.load_cube(tmp_filepath) + >>> + >>> print(cube.core_data().chunksize) + (180, 37, 25) + +Note that ``var_name`` is optional, and that you don't need to specify every dimension. If you +specify only one dimension, the rest will be optimised using Iris' default behaviour. + +.. doctest:: chunk_control + + >>> with CHUNK_CONTROL.set(longitude=25): + ... cube = iris.load_cube(tmp_filepath) + >>> + >>> print(cube.core_data().chunksize) + (120, 37, 25) + +Custom Chunking: From File +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The second context manager is :meth:`~iris.fileformats.netcdf.loader.ChunkControl.from_file`. +This takes chunksizes as defined in the NetCDF file. Any dimensions without specified chunks +will default to Iris optimisation. + +.. doctest:: chunk_control + + >>> with CHUNK_CONTROL.from_file(): + ... cube = iris.load_cube(tmp_filepath) + >>> + >>> print(cube.core_data().chunksize) + (120, 37, 49) + +Custom Chunking: As Dask +^^^^^^^^^^^^^^^^^^^^^^^^ + +The final context manager, :meth:`~iris.fileformats.netcdf.loader.ChunkControl.as_dask`, bypasses +Iris' optimisation all together, and will take its chunksizes from Dask's behaviour. + +.. doctest:: chunk_control + + >>> with CHUNK_CONTROL.as_dask(): + ... cube = iris.load_cube(tmp_filepath) + >>> + >>> print(cube.core_data().chunksize) + (70, 37, 49) + + +Split Attributes +----------------- + +TBC + + +Deferred Saving +---------------- + +TBC + + +Guessing Coordinate Axes +------------------------ + +Iris will attempt to add an ``axis`` attribute when saving any coordinate +variable in a NetCDF file. E.g: + +:: + + float longitude(longitude) ; + longitude:axis = "X" ; + +This is achieved by calling :func:`iris.util.guess_coord_axis` on each +coordinate being saved. + +Disabling Axis-Guessing +^^^^^^^^^^^^^^^^^^^^^^^ + +For some coordinates, :func:`~iris.util.guess_coord_axis` will derive an +axis that is not appropriate. If you have such a coordinate, you can disable +axis-guessing by setting the coordinate's +:attr:`~iris.coords.Coord.ignore_axis` property to ``True``. + +One example (from https://github.com/SciTools/iris/issues/5003) is a +coordinate describing pressure thresholds, measured in hecto-pascals. +Iris interprets pressure units as indicating a Z-dimension coordinate, since +pressure is most commonly used to describe altitude/depth. But a +**pressure threshold** coordinate is instead describing alternate +**scenarios** - not a spatial dimension at all - and it is therefore +inappropriate to assign an axis to it. + +Worked example: + +.. doctest:: + + >>> from iris.coords import DimCoord + >>> from iris.util import guess_coord_axis + >>> my_coord = DimCoord( + ... points=[1000, 1010, 1020], + ... long_name="pressure_threshold", + ... units="hPa", + ... ) + >>> print(guess_coord_axis(my_coord)) + Z + >>> my_coord.ignore_axis = True + >>> print(guess_coord_axis(my_coord)) + None diff --git a/docs/src/further_topics/ugrid/data_model.rst b/docs/src/further_topics/ugrid/data_model.rst new file mode 100644 index 0000000000..1660f6d08c --- /dev/null +++ b/docs/src/further_topics/ugrid/data_model.rst @@ -0,0 +1,569 @@ +.. include:: ../../common_links.inc + +.. _ugrid model: + +The Mesh Data Model +******************* + +.. important:: + + This page is intended to summarise the essentials that Iris users need + to know about meshes. For exhaustive details on UGRID itself: + `visit the official UGRID conventions site`__. + +Evolution, not revolution +========================= +Mesh support has been designed wherever possible to fit within the existing +Iris model. Meshes concern only the spatial geography of data, and can +optionally be limited to just the horizontal geography (e.g. X and Y). Other +dimensions such as time or ensemble member (and often vertical levels) +retain their familiar structured format. + +The UGRID conventions themselves are designed as an addition to the existing CF +conventions, which are at the core of Iris' philosophy. + +What's Different? +================= + +The mesh format represents data's geography using an **unstructured +mesh**. This has significant pros and cons when compared to a structured grid. + +.. contents:: + :local: + +The Detail +---------- +.. + The diagram images are SVG's, so editable by any graphical software + (e.g. Inkscape). They were originally made in MS PowerPoint. + + Uses the IBM Colour Blind Palette (see + https://ibm-design-language.eu-de.mybluemix.net/design/language/resources/color-library + ) + +Structured Grids (the old world) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Assigning data to locations using a structured grid is essentially an act of +matching coordinate arrays to each dimension of the data array. The data can +also be represented as an area (instead of a point) by including a bounds array +for each coordinate array. :ref:`data_structured_grid` visualises an +example. + +.. _data_structured_grid: +.. figure:: images/data_structured_grid.svg + :alt: Diagram of how data is represented on a structured grid + :align: left + :width: 1280 + + Data on a structured grid. + + 1D coordinate arrays (pink circles) are combined to construct a structured + grid of points (pink crosses). 2D bounds arrays (blue circles) can also be + used to describe the 1D boundaries (blue lines) at either side of each + rank of points; each point therefore having four bounds (x+y, upper+lower), + together describing a quadrilateral area around that point. Data from the + 2D data array (orange circles) can be assigned to these point locations + (orange diamonds) or area locations (orange quads) by matching the relative + positions in the data array to the relative spatial positions - see the + black outlined shapes as examples of this in action. + +Unstructured Meshes (the new world) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +A mesh is made up of different types of **element**: + +.. list-table:: + :widths: 15, 15, 70 + + * - 0D + - ``node`` + - The 'core' of the mesh. A point position in space, constructed from + 2 or 3 coordinates (2D or 3D space). + * - 1D + - ``edge`` + - Constructed by connecting 2 nodes. + * - 2D + - ``face`` + - Constructed by connecting 3 or more nodes. + * - 3D + - ``volume`` + - Constructed by connecting 4 or more nodes (which must each have 3 + coordinates - 3D space). + +Every node in the mesh is defined by indexing the 1-dimensional X and Y (and +optionally Z) coordinate arrays (the ``node_coordinates``) - e.g. +``(x[3], y[3])`` gives the position of the fourth node. Note that this means +each node has its own coordinates, independent of every other node. + +Any higher dimensional element - an edge/face/volume - is described by a +sequence of the indices of the nodes that make up that element. E.g. a +triangular face made from connecting the first, third and fourth nodes: +``[0, 2, 3]``. These 1D sequences combine into a 2D array enumerating **all** +the elements of that type - edge/face/volume - called a **connectivity**. +E.g. we could make a mesh of 4 nodes, with 2 triangles described using this +``face_node_connectivity``: ``[[0, 2, 3], [3, 2, 1]]`` (note the shared nodes). + +.. note:: More on Connectivities: + + * The element type described by a connectivity is known as its + **location**; ``edge`` in ``edge_node_connectivity``. + * According to the UGRID conventions, the nodes in a face should be + listed in "anti-clockwise order from above". + * Connectivities also exist to connect the higher dimensional elements, + e.g. ``face_edge_connectivity``. These are optional conveniences to + speed up certain operations and will not be discussed here. + +.. important:: + + **Meshes are unstructured**. The mesh elements - represented in the + coordinate and connectivity arrays detailed above - are enumerated + along a single **unstructured dimension**. An element's position along + this dimension has nothing to do with its spatial position. + +A data variable associated with a mesh has a **location** of either ``node``, +``edge``, ``face`` or ``volume``. The data is stored in a 1D array with one +datum per element, matched to its element by matching the datum index with the +coordinate or connectivity index along the **unstructured dimension**. So for +an example data array called ``foo``: +``foo[3]`` would be at position ``(x[3], y[3])`` if it were node-located, or at +``faces[3]`` if it were face-located. :ref:`data_ugrid_mesh` visualises an +example of what is described above. + +.. _data_ugrid_mesh: +.. figure:: images/data_ugrid_mesh.svg + :alt: Diagram of how data is represented on an unstructured mesh + :align: left + :width: 1280 + + Data on an unstructured mesh + + 1D coordinate arrays (pink circles) describe node positions in space (pink + crosses). A 2D connectivity array (blue circles) describes faces by + connecting four nodes - by referencing their indices - into a face outline + (blue outlines on the map). Data from the 1D data array (orange circles) + can be assigned to these node locations (orange diamonds) or face locations + (orange quads) by matching the indices in the data array to the indices in + the coordinate arrays (for nodes) or connectivity array (for faces). See + the black outlined shapes as examples of index matching in action, and the + black stippled shapes to demonstrate that relative array position confers + no relative spatial information. + +---- + +The mesh model also supports edges/faces/volumes having associated 'centre' +coordinates - to allow point data to be assigned to these elements. 'Centre' is +just a convenience term - the points can exist anywhere within their respective +elements. See :ref:`ugrid_element_centres` for a visualised example. + +.. _ugrid_element_centres: +.. figure:: images/ugrid_element_centres.svg + :alt: Diagram demonstrating mesh face-centred data. + :align: left + :width: 1280 + + Data can be assigned to mesh edge/face/volume 'centres' + + 1D *node* coordinate arrays (pink circles) describe node positions in + space (pink crosses). A 2D connectivity array (blue circles) describes + faces by connecting four nodes into a face outline (blue outlines on the + map). Further 1D *face* coordinate arrays (pink circles) describe a + 'centre' point position (pink stars) for each face enumerated in the + connectivity array. + +Mesh Flexibility +++++++++++++++++ +Above we have seen how one could replicate data on a structured grid using +a mesh instead. But the utility of a mesh is the extra flexibility it offers. +Here are the main examples: + +Every node is completely independent - every one can have unique X andY (and Z) coordinate values. See :ref:`ugrid_node_independence`. + +.. _ugrid_node_independence: +.. figure:: images/ugrid_node_independence.svg + :alt: Diagram demonstrating the independence of each mesh node + :align: left + :width: 300 + + Every mesh node is completely independent + + The same array shape and structure used to describe the node positions + (pink crosses) in a regular grid (left-hand maps) is equally able to + describe **any** position for these nodes (e.g. the right-hand maps), + simply by changing the array values. The quadrilateral faces (blue + outlines) can therefore be given any quadrilateral shape by re-positioning + their constituent nodes. + +Faces and volumes can have variable node counts, i.e. different numbers of +sides. This is achieved by masking the unused 'slots' in the connectivity +array. See :ref:`ugrid_variable_faces`. + +.. _ugrid_variable_faces: +.. figure:: images/ugrid_variable_faces.svg + :alt: Diagram demonstrating mesh faces with variable node counts + :align: left + :width: 300 + + Mesh faces can have different node counts (using masking) + + The 2D connectivity array (blue circles) describes faces by connecting + nodes (pink crosses) to make up a face (blue outlines). The faces can use + different numbers of nodes by shaping the connectivity array to accommodate + the face with the most nodes, then masking unused node 'slots' + (black circles) for faces with fewer nodes than the maximum. + +Data can be assigned to lines (edges) just as easily as points (nodes) or +areas (faces). See :ref:`ugrid_edge_data`. + +.. _ugrid_edge_data: +.. figure:: images/ugrid_edge_data.svg + :alt: Diagram demonstrating data assigned to mesh edges + :align: left + :width: 300 + + Data can be assigned to mesh edges + + The 2D connectivity array (blue circles) describes edges by connecting 2 + nodes (pink crosses) to make up an edge (blue lines). Data can be assigned + to the edges (orange lines) by matching the indices of the 1D data array + (not shown) to the indices in the connectivity array. + +.. _ugrid implications: + +What does this mean? +-------------------- +Meshes can represent much more varied spatial arrangements +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +The highly specific way of recording position (geometry) and shape +(topology) allows meshes to represent essentially **any** spatial arrangement +of data. There are therefore many new applications that aren't possible using a +structured grid, including: + +* `The UK Met Office's LFRic cubed-sphere `_ +* `Oceanic model outputs `_ + +.. todo: + a third example! + +Mesh 'payload' is much larger than with structured grids +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Coordinates are recorded per-node, and connectivities are recorded per-element. +This is opposed to a structured grid, where a single coordinate value is shared +by every data point/area along that line. + +For example: representing the surface of a cubed-sphere using a mesh leads to +coordinates and connectivities being **~8 times larger than the data itself**, +as opposed to a small fraction of the data size when dividing a spherical +surface using a structured grid of longitudes and latitudes. + +This further increases the emphasis on lazy loading and processing of data +using packages such as Dask. + +.. note:: + + The large, 1D data arrays associated with meshes are a very different + shape to what Iris users and developers are used to. It is suspected + that optimal performance will need new chunking strategies, but at time + of writing (``Jan 2022``) experience is still limited. + +.. todo: + Revisit when we have more information. + +Spatial operations on mesh data are more complex +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Detail: :doc:`operations` + +Indexing a mesh data array cannot be used for: + +#. Region selection +#. Neighbour identification + +This is because - unlike with a structured data array - relative position in +a mesh's 1-dimensional data arrays has no relation to relative position in +space. We must instead perform specialised operations using the information in +the mesh's connectivities, or by translating the mesh into a format designed +for mesh analysis such as VTK. + +Such calculations can still be optimised to avoid them slowing workflows, but +the important take-away here is that **adaptation is needed when working mesh +data**. + + +How Iris Represents This +======================== + +.. + Include API links to the various classes + + Include Cube/Mesh printout(s) + +.. seealso:: + + Remember this is a prose summary. Precise documentation is at: + :mod:`iris.mesh`. + +.. note:: + + At time of writing (``Jan 2022``), neither 3D meshes nor 3D elements + (volumes) are supported. + +The Basics +---------- +The Iris :class:`~iris.cube.Cube` has several new members: + +* | :attr:`~iris.cube.Cube.mesh` + | The :class:`iris.mesh.MeshXY` that describes the + :class:`~iris.cube.Cube`\'s horizontal geography. +* | :attr:`~iris.cube.Cube.location` + | ``node``/``edge``/``face`` - the mesh element type with which this + :class:`~iris.cube.Cube`\'s :attr:`~iris.cube.Cube.data` is associated. +* | :meth:`~iris.cube.Cube.mesh_dim` + | The :class:`~iris.cube.Cube`\'s **unstructured dimension** - the one that + indexes over the horizontal :attr:`~iris.cube.Cube.data` positions. + +These members will all be ``None`` for a :class:`~iris.cube.Cube` with no +associated :class:`~iris.mesh.MeshXY`. + +This :class:`~iris.cube.Cube`\'s unstructured dimension has multiple attached +:class:`iris.mesh.MeshCoord`\s (one for each axis e.g. +``x``/``y``), which can be used to infer the points and bounds of any index on +the :class:`~iris.cube.Cube`\'s unstructured dimension. + +.. testsetup:: ugrid_summaries + + import numpy as np + + from iris.coords import AuxCoord, DimCoord + from iris.cube import Cube + from iris.mesh import Connectivity, MeshXY + + node_x = AuxCoord( + points=[0.0, 5.0, 0.0, 5.0, 8.0], + standard_name="longitude", + units="degrees_east", + ) + node_y = AuxCoord( + points=[3.0, 3.0, 0.0, 0.0, 0.0], + standard_name="latitude", + units="degrees_north", + ) + + edge_node_c = Connectivity( + indices=[[0, 1], [0, 2], [1, 3], [1, 4], [2, 3], [3, 4]], + cf_role="edge_node_connectivity", + ) + + face_indices = np.ma.masked_equal([[0, 1, 3, 2], [1, 4, 3, 999]], 999) + face_node_c = Connectivity( + indices=face_indices, cf_role="face_node_connectivity" + ) + + def centre_coords(conn): + indexing = np.ma.filled(conn.indices, 0) + x, y = [ + AuxCoord( + node_coord.points[indexing].mean(axis=conn.connected_axis), + node_coord.standard_name, + units=node_coord.units, + ) + for node_coord in (node_x, node_y) + ] + return [(x, "x"), (y, "y")] + + my_mesh = MeshXY( + long_name="my_mesh", + topology_dimension=2, + node_coords_and_axes=[(node_x, "x"), (node_y, "y")], + connectivities=[edge_node_c, face_node_c], + edge_coords_and_axes=centre_coords(edge_node_c), + face_coords_and_axes=centre_coords(face_node_c), + ) + + vertical_levels = DimCoord([0, 1, 2], "height") + + def location_cube(conn): + location = conn.location + mesh_coord_x, mesh_coord_y = my_mesh.to_MeshCoords(location) + data_shape = (conn.shape[conn.location_axis], len(vertical_levels.points)) + data_array = np.arange(np.prod(data_shape)).reshape(data_shape) + + return Cube( + data=data_array, + long_name=f"{location}_data", + units="K", + dim_coords_and_dims=[(vertical_levels, 1)], + aux_coords_and_dims=[(mesh_coord_x, 0), (mesh_coord_y, 0)], + ) + + edge_cube = location_cube(edge_node_c) + face_cube = location_cube(face_node_c) + +.. doctest:: ugrid_summaries + + >>> print(edge_cube) + edge_data / (K) (-- : 6; height: 3) + Dimension coordinates: + height - x + Mesh coordinates: + latitude x - + longitude x - + Mesh: + name my_mesh + location edge + + >>> print(edge_cube.location) + edge + + >>> print(edge_cube.mesh_dim()) + 0 + + >>> print(edge_cube.mesh.summary(shorten=True)) + + +The Detail +---------- +How UGRID information is stored +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +* | :class:`iris.mesh.MeshXY` + | Contains all information about the mesh. + | Includes: + + * | :attr:`~iris.mesh.MeshXY.topology_dimension` + | The maximum dimensionality of shape (1D=edge, 2D=face) supported + by this :class:`~iris.mesh.MeshXY`. Determines which + :class:`~iris.mesh.Connectivity`\s are required/optional + (see below). + + * 1-3 collections of :class:`iris.coords.AuxCoord`\s: + + * | **Required**: :attr:`~iris.mesh.MeshXY.node_coords` + | The nodes that are the basis for the mesh. + * | Optional: :attr:`~iris.mesh.Mesh.edge_coords`, + :attr:`~iris.mesh.MeshXY.face_coords` + | For indicating the 'centres' of the edges/faces. + | **NOTE:** generating a :class:`~iris.mesh.MeshCoord` from + a :class:`~iris.mesh.MeshXY` currently (``Jan 2022``) + requires centre coordinates for the given ``location``; to be rectified + in future. + + * 1 or more :class:`iris.mesh.Connectivity`\s: + + * | **Required for 1D (edge) elements**: + :attr:`~iris.mesh.MeshXY.edge_node_connectivity` + | Define the edges by connecting nodes. + * | **Required for 2D (face) elements**: + :attr:`~iris.mesh.MeshXY.face_node_connectivity` + | Define the faces by connecting nodes. + * Optional: any other connectivity type. See + :attr:`iris.mesh.Connectivity.UGRID_CF_ROLES` for the + full list of types. + +.. doctest:: ugrid_summaries + + >>> print(edge_cube.mesh) + MeshXY : 'my_mesh' + topology_dimension: 2 + node + node_dimension: 'Mesh2d_node' + node coordinates + + + edge + edge_dimension: 'Mesh2d_edge' + edge_node_connectivity: + edge coordinates + + + face + face_dimension: 'Mesh2d_face' + face_node_connectivity: + face coordinates + + + long_name: 'my_mesh' + +* | :class:`iris.mesh.MeshCoord` + | Described in detail in `MeshCoords`_. + | Stores the following information: + + * | :attr:`~iris.mesh.MeshCoord.mesh` + | The :class:`~iris.mesh.MeshXY` associated with this + :class:`~iris.mesh.MeshCoord`. This determines the + :attr:`~iris.cube.Cube.mesh` attribute of any :class:`~iris.cube.Cube` + this :class:`~iris.mesh.MeshCoord` is attached to (see + `The Basics`_) + + * | :attr:`~iris.mesh.MeshCoord.location` + | ``node``/``edge``/``face`` - the element detailed by this + :class:`~iris.mesh.MeshCoord`. This determines the + :attr:`~iris.cube.Cube.location` attribute of any + :class:`~iris.cube.Cube` this + :class:`~iris.mesh.MeshCoord` is attached to (see + `The Basics`_). + +.. _ugrid MeshCoords: + +MeshCoords +~~~~~~~~~~ +Links a :class:`~iris.cube.Cube` to a :class:`~iris.mesh.MeshXY` by +attaching to the :class:`~iris.cube.Cube`\'s unstructured dimension, in the +same way that all :class:`~iris.coords.Coord`\s attach to +:class:`~iris.cube.Cube` dimensions. This allows a single +:class:`~iris.cube.Cube` to have a combination of unstructured and structured +dimensions (e.g. horizontal mesh plus vertical levels and a time series), +using the same logic for every dimension. + +:class:`~iris.mesh.MeshCoord`\s are instantiated using a given +:class:`~iris.mesh.MeshXY`, ``location`` +("node"/"edge"/"face") and ``axis``. The process interprets the +:class:`~iris.mesh.MeshXY`\'s +:attr:`~iris.mesh.MeshXY.node_coords` and if appropriate the +:attr:`~iris.mesh.MeshXY.edge_node_connectivity`/ +:attr:`~iris.mesh.MeshXY.face_node_connectivity` and +:attr:`~iris.mesh.MeshXY.edge_coords`/ +:attr:`~iris.mesh.MeshXY.face_coords` +to produce a :class:`~iris.coords.Coord` +:attr:`~iris.coords.Coord.points` and :attr:`~iris.coords.Coord.bounds` +representation of all the :class:`~iris.mesh.MeshXY`\'s +nodes/edges/faces for the given axis. + +The method :meth:`iris.mesh.MeshXY.to_MeshCoords` is available to +create a :class:`~iris.mesh.MeshCoord` for +every axis represented by that :class:`~iris.mesh.MeshXY`, +given only the ``location`` argument + +.. doctest:: ugrid_summaries + + >>> for coord in edge_cube.coords(mesh_coords=True): + ... print(coord) + MeshCoord : latitude / (degrees_north) + mesh: + location: 'edge' + points: [3. , 1.5, 1.5, 1.5, 0. , 0. ] + bounds: [ + [3., 3.], + [3., 0.], + [3., 0.], + [3., 0.], + [0., 0.], + [0., 0.]] + shape: (6,) bounds(6, 2) + dtype: float64 + standard_name: 'latitude' + axis: 'y' + MeshCoord : longitude / (degrees_east) + mesh: + location: 'edge' + points: [2.5, 0. , 5. , 6.5, 2.5, 6.5] + bounds: [ + [0., 5.], + [0., 0.], + [5., 5.], + [5., 8.], + [0., 5.], + [5., 8.]] + shape: (6,) bounds(6, 2) + dtype: float64 + standard_name: 'longitude' + axis: 'x' + + +__ CF-UGRID_ \ No newline at end of file diff --git a/docs/src/further_topics/ugrid/images/data_structured_grid.svg b/docs/src/further_topics/ugrid/images/data_structured_grid.svg new file mode 100644 index 0000000000..28f088bd71 --- /dev/null +++ b/docs/src/further_topics/ugrid/images/data_structured_grid.svg @@ -0,0 +1,1374 @@ + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 23, 28- +1 +9, +- +21 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1015 +25 +- + + + + + +5- + + + + + +15- + + + + + +20- + + + + + +30 + + + + + + + + + + + x + + yCoordinate Arrays + + + + + + + + + + + + x + + yCoordinate Arrays +23, 28 +- +1 +9, +- +21 + + + + + + + + + + + + + + + + + + + + + + + + + + x + + yBounds Arrays + + + + + + + + derive point locationsassign data using +dimensional indices, +position in array == relative +spatial position +derive area +locations & shapes +Point Data + + + Area Data + + Data Array(bounded +coords +always have +points too) + + + my_variable + + + + + + + + + + + + + + + * x+y +are not +lons+lats +, just a demonstration! + + + + diff --git a/docs/src/further_topics/ugrid/images/data_ugrid_mesh.svg b/docs/src/further_topics/ugrid/images/data_ugrid_mesh.svg new file mode 100644 index 0000000000..c2b822fbcf --- /dev/null +++ b/docs/src/further_topics/ugrid/images/data_ugrid_mesh.svg @@ -0,0 +1,2273 @@ + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 5, 7, 8, 14 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ` + + + + x + + y12 +12 +` +node_coordinates +` +every node +has its own +x + y +coordinates + + + derive node locations + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1515 + + + x + + y` +node_coordinates +` +[5] +[7] +[8] +[14] +construct faces +by connecting +nodes +derive ‘corner’ +node locations + + + + + + + + assign data using 1D indexing, +position in array +unrelated to spatial +position +match indices +with faces +match indices +with nodes +Node Data + + + Face Data + + + + + + + + + + + + + + + 12Data +Array + + + my_variable + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 12 × +4 +` +face_node_connectivity +` + + + face_nodes + + + + + + + + + + + + + + + + + diff --git a/docs/src/further_topics/ugrid/images/fesom_mesh.png b/docs/src/further_topics/ugrid/images/fesom_mesh.png new file mode 100644 index 0000000000..283899a94b Binary files /dev/null and b/docs/src/further_topics/ugrid/images/fesom_mesh.png differ diff --git a/docs/src/further_topics/ugrid/images/geovistalogo.svg b/docs/src/further_topics/ugrid/images/geovistalogo.svg new file mode 100644 index 0000000000..4c68f0ee3f --- /dev/null +++ b/docs/src/further_topics/ugrid/images/geovistalogo.svg @@ -0,0 +1,573 @@ + + + + + + + + + + + + + + + + + + + + + + + + Cartographic rendering and mesh analytics powered by PyVista. + GeoVista + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + GeoVista + + \ No newline at end of file diff --git a/docs/src/further_topics/ugrid/images/iris-esmf-regrid.svg b/docs/src/further_topics/ugrid/images/iris-esmf-regrid.svg new file mode 100644 index 0000000000..93e35cb21d --- /dev/null +++ b/docs/src/further_topics/ugrid/images/iris-esmf-regrid.svg @@ -0,0 +1,93 @@ + + + + + + + + + + image/svg+xml + + + + + + + + + Iris + + diff --git a/docs/src/further_topics/ugrid/images/orca_grid.png b/docs/src/further_topics/ugrid/images/orca_grid.png new file mode 100644 index 0000000000..f07ba0dcd1 Binary files /dev/null and b/docs/src/further_topics/ugrid/images/orca_grid.png differ diff --git a/docs/src/further_topics/ugrid/images/plotting.png b/docs/src/further_topics/ugrid/images/plotting.png new file mode 100644 index 0000000000..6e7d570ba2 Binary files /dev/null and b/docs/src/further_topics/ugrid/images/plotting.png differ diff --git a/docs/src/further_topics/ugrid/images/smc_mesh.png b/docs/src/further_topics/ugrid/images/smc_mesh.png new file mode 100644 index 0000000000..8c5a9d86eb Binary files /dev/null and b/docs/src/further_topics/ugrid/images/smc_mesh.png differ diff --git a/docs/src/further_topics/ugrid/images/ugrid_edge_data.svg b/docs/src/further_topics/ugrid/images/ugrid_edge_data.svg new file mode 100644 index 0000000000..1520d22264 --- /dev/null +++ b/docs/src/further_topics/ugrid/images/ugrid_edge_data.svg @@ -0,0 +1,630 @@ + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + `edge_node_connectivity +` +12 +× +2 + + + diff --git a/docs/src/further_topics/ugrid/images/ugrid_element_centres.svg b/docs/src/further_topics/ugrid/images/ugrid_element_centres.svg new file mode 100644 index 0000000000..94ab6ec585 --- /dev/null +++ b/docs/src/further_topics/ugrid/images/ugrid_element_centres.svg @@ -0,0 +1,1276 @@ + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + `face_node_connectivity +` + + + x + + y` +node_coordinates +` + + + x + + y` +face_coordinates +` +15 +15 +12 + +×4 +12 +12 + + + + `face_coordinates +` +` +nod + +e_ +coordinates +` + + + + diff --git a/docs/src/further_topics/ugrid/images/ugrid_node_independence.svg b/docs/src/further_topics/ugrid/images/ugrid_node_independence.svg new file mode 100644 index 0000000000..d63000da92 --- /dev/null +++ b/docs/src/further_topics/ugrid/images/ugrid_node_independence.svg @@ -0,0 +1,865 @@ + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ` + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/src/further_topics/ugrid/images/ugrid_variable_faces.svg b/docs/src/further_topics/ugrid/images/ugrid_variable_faces.svg new file mode 100644 index 0000000000..91223e269a --- /dev/null +++ b/docs/src/further_topics/ugrid/images/ugrid_variable_faces.svg @@ -0,0 +1,943 @@ + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + `face_node_connectivity +` +12 +× +6 + + + diff --git a/docs/src/further_topics/ugrid/index.rst b/docs/src/further_topics/ugrid/index.rst new file mode 100644 index 0000000000..c247a9dc6d --- /dev/null +++ b/docs/src/further_topics/ugrid/index.rst @@ -0,0 +1,56 @@ +.. include:: ../../common_links.inc + +.. _ugrid: + +Mesh Support +************ + +Iris includes specialised handling of mesh-located data (as opposed to +grid-located data). Iris and its :ref:`partner packages ` are +designed to make working with mesh-located data as simple as possible, with new +capabilities being added all the time. More detail is in this section and in +the :mod:`iris.mesh` API documentation. + +This mesh support is based on the `CF-UGRID Conventions`__; UGRID-conformant +meshes + data can be loaded from a file into Iris' data model, and meshes + +data represented in Iris' data model can be saved as a UGRID-conformant file. + +---- + +Meshes are different + Mesh-located data is fundamentally different to grid-located data. + Many of Iris' existing operations need adapting before they can work with + mesh-located data, and in some cases entirely new concepts are needed. + **Read the detail in these pages before jumping into your own code.** +Iris' mesh support is experimental + This is a rapidly evolving part of the codebase at time of writing + (``Jan 2022``), as we continually expand the operations that work with mesh + data. **Be prepared for breaking changes even in minor releases.** +:ref:`Get involved! ` + We know meshes are an exciting new area for much of Earth science, so we hope + there are a lot of you with new files/ideas/wishlists, and we'd love to hear + more 🙂. + +---- + +Read on to find out more... + +* :doc:`data_model` - learn why the mesh experience is so different. +* :doc:`partner_packages` - meet some optional dependencies that provide powerful mesh operations. +* :doc:`operations` - experience how your workflows will look when written for mesh data. +* :doc:`other_meshes` - check out some examples of converting various mesh formats into Iris' mesh format. + +.. + Need an actual TOC to get Sphinx working properly, but have hidden it in + favour of the custom bullets above. + +.. toctree:: + :hidden: + :maxdepth: 1 + + data_model + partner_packages + operations + other_meshes + +__ CF-UGRID_ diff --git a/docs/src/further_topics/ugrid/operations.rst b/docs/src/further_topics/ugrid/operations.rst new file mode 100644 index 0000000000..97dfaaa5b1 --- /dev/null +++ b/docs/src/further_topics/ugrid/operations.rst @@ -0,0 +1,883 @@ +.. _ugrid operations: + +Working with Mesh Data +********************** + +.. note:: Several of the operations below rely on the optional dependencies + mentioned in :doc:`partner_packages`. + +Operations Summary +------------------ +.. list-table:: + :align: left + :widths: 35, 75 + + * - `Making a Mesh`_ + - |tagline: making a mesh| + * - `Making a Cube`_ + - |tagline: making a cube| + * - `Save`_ + - |tagline: save| + * - `Load`_ + - |tagline: load| + * - `Plotting`_ + - |tagline: plotting| + * - `Region Extraction`_ + - |tagline: region extraction| + * - `Regridding`_ + - |tagline: regridding| + * - `Equality`_ + - |tagline: equality| + * - `Combining Cubes`_ + - |tagline: combining cubes| + * - `Arithmetic`_ + - |tagline: arithmetic| + +.. + Below: use demo code over prose wherever workable. Headings aren't an + exhaustive list (can you think of any other popular operations?). + +Making a Mesh +------------- +.. |tagline: making a mesh| replace:: |new| + +.. rubric:: |tagline: making a mesh| + +**Already have a file?** Consider skipping to `Load`_. + +Creating Iris objects from scratch is a highly useful skill for testing code +and improving understanding of how Iris works. This knowledge will likely prove +particularly useful when converting data into the Iris mesh data model from +structured formats and non-UGRID mesh formats. + +The objects created in this example will be used where possible in the +subsequent example operations on this page. + +.. dropdown:: Code + :icon: code + + .. doctest:: ugrid_operations + + >>> import numpy as np + + >>> from iris.coords import AuxCoord + >>> from iris.mesh import Connectivity, MeshXY + + # Going to create the following mesh + # (node indices are shown to aid understanding): + # + # 0----1 + # | |\ + # | + |+\ + # 2----3--4 + + >>> node_x = AuxCoord( + ... points=[0.0, 5.0, 0.0, 5.0, 8.0], + ... standard_name="longitude", + ... units="degrees_east", + ... long_name="node_x_coordinates", + ... ) + >>> node_y = AuxCoord(points=[3.0, 3.0, 0.0, 0.0, 0.0], standard_name="latitude") + + >>> face_x = AuxCoord([2.0, 6.0], "longitude") + >>> face_y = AuxCoord([1.0, 1.0], "latitude") + + >>> edge_node_c = Connectivity( + ... indices=[[0, 1], [0, 2], [1, 3], [1, 4], [2, 3], [3, 4]], + ... cf_role="edge_node_connectivity", + ... attributes={"demo": "Supports every standard CF property"}, + ... ) + + # Create some dead-centre edge coordinates. + >>> edge_x, edge_y = [ + ... AuxCoord( + ... node_coord.points[edge_node_c.indices_by_location()].mean(axis=1), + ... node_coord.standard_name, + ... ) + ... for node_coord in (node_x, node_y) + ... ] + + >>> face_indices = np.ma.masked_equal([[0, 1, 3, 2], [1, 4, 3, 999]], 999) + >>> face_node_c = Connectivity( + ... indices=face_indices, cf_role="face_node_connectivity" + ... ) + + >>> my_mesh = MeshXY( + ... long_name="my_mesh", + ... topology_dimension=2, # Supports 2D (face) elements. + ... node_coords_and_axes=[(node_x, "x"), (node_y, "y")], + ... connectivities=[edge_node_c, face_node_c], + ... edge_coords_and_axes=[(edge_x, "x"), (edge_y, "y")], + ... face_coords_and_axes=[(face_x, "x"), (face_y, "y")], + ... ) + + >>> print(my_mesh) + MeshXY : 'my_mesh' + topology_dimension: 2 + node + node_dimension: 'Mesh2d_node' + node coordinates + + + edge + edge_dimension: 'Mesh2d_edge' + edge_node_connectivity: + edge coordinates + + + face + face_dimension: 'Mesh2d_face' + face_node_connectivity: + face coordinates + + + long_name: 'my_mesh' + + +.. _making a cube: + +Making a Cube (with a Mesh) +--------------------------- +.. |tagline: making a cube| replace:: |unchanged| + +.. rubric:: |tagline: making a cube| + +Creating a :class:`~iris.cube.Cube` is unchanged; the +:class:`~iris.mesh.MeshXY` is linked via a +:class:`~iris.mesh.MeshCoord` (see :ref:`ugrid MeshCoords`): + +.. dropdown:: Code + :icon: code + + .. doctest:: ugrid_operations + + >>> import numpy as np + + >>> from iris.coords import DimCoord + >>> from iris.cube import Cube, CubeList + + >>> vertical_levels = DimCoord([0, 1, 2], "height") + + >>> my_cubelist = CubeList() + >>> for conn in (edge_node_c, face_node_c): + ... location = conn.location + ... mesh_coord_x, mesh_coord_y = my_mesh.to_MeshCoords(location) + ... data_shape = (len(conn.indices_by_location()), len(vertical_levels.points)) + ... data_array = np.arange(np.prod(data_shape)).reshape(data_shape) + ... + ... my_cubelist.append( + ... Cube( + ... data=data_array, + ... long_name=f"{location}_data", + ... units="K", + ... dim_coords_and_dims=[(vertical_levels, 1)], + ... aux_coords_and_dims=[(mesh_coord_x, 0), (mesh_coord_y, 0)], + ... ) + ... ) + + >>> print(my_cubelist) + 0: edge_data / (K) (-- : 6; height: 3) + 1: face_data / (K) (-- : 2; height: 3) + + >>> for cube in my_cubelist: + ... print(f"{cube.name()}: {cube.mesh.name()}, {cube.location}") + edge_data: my_mesh, edge + face_data: my_mesh, face + + >>> print(my_cubelist.extract_cube("edge_data")) + edge_data / (K) (-- : 6; height: 3) + Dimension coordinates: + height - x + Mesh coordinates: + latitude x - + longitude x - + Mesh: + name my_mesh + location edge + + +Save +---- +.. |tagline: save| replace:: |unchanged| + +.. rubric:: |tagline: save| + +.. note:: UGRID saving support is limited to the NetCDF file format. + +The Iris saving process automatically detects if the :class:`~iris.cube.Cube` +has an associated :class:`~iris.mesh.MeshXY` and automatically +saves the file in a UGRID-conformant format: + +.. dropdown:: Code + :icon: code + + .. doctest:: ugrid_operations + + >>> from subprocess import run + + >>> from iris import save + + >>> cubelist_path = "my_cubelist.nc" + >>> save(my_cubelist, cubelist_path) + + >>> ncdump_result = run(["ncdump", "-h", cubelist_path], capture_output=True) + >>> print(ncdump_result.stdout.decode().replace("\t", " ")) + netcdf my_cubelist { + dimensions: + Mesh2d_node = 5 ; + Mesh2d_edge = 6 ; + Mesh2d_face = 2 ; + height = 3 ; + my_mesh_face_N_nodes = 4 ; + my_mesh_edge_N_nodes = 2 ; + variables: + int my_mesh ; + my_mesh:cf_role = "mesh_topology" ; + my_mesh:topology_dimension = 2 ; + my_mesh:long_name = "my_mesh" ; + my_mesh:node_coordinates = "longitude latitude" ; + my_mesh:edge_coordinates = "longitude_0 latitude_0" ; + my_mesh:face_coordinates = "longitude_1 latitude_1" ; + my_mesh:face_node_connectivity = "mesh2d_face" ; + my_mesh:edge_node_connectivity = "mesh2d_edge" ; + double longitude(Mesh2d_node) ; + longitude:units = "degrees_east" ; + longitude:standard_name = "longitude" ; + longitude:long_name = "node_x_coordinates" ; + double latitude(Mesh2d_node) ; + latitude:standard_name = "latitude" ; + double longitude_0(Mesh2d_edge) ; + longitude_0:standard_name = "longitude" ; + double latitude_0(Mesh2d_edge) ; + latitude_0:standard_name = "latitude" ; + double longitude_1(Mesh2d_face) ; + longitude_1:standard_name = "longitude" ; + double latitude_1(Mesh2d_face) ; + latitude_1:standard_name = "latitude" ; + int64 mesh2d_face(Mesh2d_face, my_mesh_face_N_nodes) ; + mesh2d_face:_FillValue = -1LL ; + mesh2d_face:cf_role = "face_node_connectivity" ; + mesh2d_face:start_index = 0LL ; + int64 mesh2d_edge(Mesh2d_edge, my_mesh_edge_N_nodes) ; + mesh2d_edge:demo = "Supports every standard CF property" ; + mesh2d_edge:cf_role = "edge_node_connectivity" ; + mesh2d_edge:start_index = 0LL ; + int64 edge_data(Mesh2d_edge, height) ; + edge_data:long_name = "edge_data" ; + edge_data:units = "K" ; + edge_data:mesh = "my_mesh" ; + edge_data:location = "edge" ; + edge_data:coordinates = "latitude_0 longitude_0" ; + int64 height(height) ; + height:standard_name = "height" ; + int64 face_data(Mesh2d_face, height) ; + face_data:long_name = "face_data" ; + face_data:units = "K" ; + face_data:mesh = "my_mesh" ; + face_data:location = "face" ; + face_data:coordinates = "latitude_1 longitude_1" ; + + // global attributes: + :Conventions = "CF-1.7" ; + } + + +The :func:`iris.mesh.save_mesh` function allows +:class:`~iris.mesh.MeshXY`\es to be saved to file without +associated :class:`~iris.cube.Cube`\s: + +.. dropdown:: Code + :icon: code + + .. doctest:: ugrid_operations + + >>> from subprocess import run + + >>> from iris.mesh import save_mesh + + >>> mesh_path = "my_mesh.nc" + >>> save_mesh(my_mesh, mesh_path) + + >>> ncdump_result = run(["ncdump", "-h", mesh_path], capture_output=True) + >>> print(ncdump_result.stdout.decode().replace("\t", " ")) + netcdf my_mesh { + dimensions: + Mesh2d_node = 5 ; + Mesh2d_edge = 6 ; + Mesh2d_face = 2 ; + my_mesh_face_N_nodes = 4 ; + my_mesh_edge_N_nodes = 2 ; + variables: + int my_mesh ; + my_mesh:cf_role = "mesh_topology" ; + my_mesh:topology_dimension = 2 ; + my_mesh:long_name = "my_mesh" ; + my_mesh:node_coordinates = "longitude latitude" ; + my_mesh:edge_coordinates = "longitude_0 latitude_0" ; + my_mesh:face_coordinates = "longitude_1 latitude_1" ; + my_mesh:face_node_connectivity = "mesh2d_face" ; + my_mesh:edge_node_connectivity = "mesh2d_edge" ; + double longitude(Mesh2d_node) ; + longitude:units = "degrees_east" ; + longitude:standard_name = "longitude" ; + longitude:long_name = "node_x_coordinates" ; + double latitude(Mesh2d_node) ; + latitude:standard_name = "latitude" ; + double longitude_0(Mesh2d_edge) ; + longitude_0:standard_name = "longitude" ; + double latitude_0(Mesh2d_edge) ; + latitude_0:standard_name = "latitude" ; + double longitude_1(Mesh2d_face) ; + longitude_1:standard_name = "longitude" ; + double latitude_1(Mesh2d_face) ; + latitude_1:standard_name = "latitude" ; + int64 mesh2d_face(Mesh2d_face, my_mesh_face_N_nodes) ; + mesh2d_face:_FillValue = -1LL ; + mesh2d_face:cf_role = "face_node_connectivity" ; + mesh2d_face:start_index = 0LL ; + int64 mesh2d_edge(Mesh2d_edge, my_mesh_edge_N_nodes) ; + mesh2d_edge:demo = "Supports every standard CF property" ; + mesh2d_edge:cf_role = "edge_node_connectivity" ; + mesh2d_edge:start_index = 0LL ; + + // global attributes: + :Conventions = "CF-1.7" ; + } + + +Load +---- +.. |tagline: load| replace:: |unchanged| + +.. rubric:: |tagline: load| + +.. note:: UGRID loading support is limited to the NetCDF file format. + +Iris mesh support detects + parses any UGRID information when loading files, to +produce cubes with a non-empty ".mesh" property. + +.. dropdown:: Code + :icon: code + + .. doctest:: ugrid_operations + + >>> from iris import load + + >>> loaded_cubelist = load(cubelist_path) + + # Sort CubeList to ensure consistent result. + >>> loaded_cubelist.sort(key=lambda cube: cube.name()) + >>> print(loaded_cubelist) + 0: edge_data / (K) (-- : 6; height: 3) + 1: face_data / (K) (-- : 2; height: 3) + +All the existing loading functionality still operates on UGRID-compliant +data - :class:`~iris.Constraint`\s, callbacks, :func:`~iris.load_cube` +etcetera: + +.. dropdown:: Code + :icon: code + + .. doctest:: ugrid_operations + + >>> from iris import Constraint, load_cube + + >>> ground_cubelist = load(cubelist_path, Constraint(height=0)) + >>> face_cube = load_cube(cubelist_path, "face_data") + + # Sort CubeList to ensure consistent result. + >>> ground_cubelist.sort(key=lambda cube: cube.name()) + >>> print(ground_cubelist) + 0: edge_data / (K) (-- : 6) + 1: face_data / (K) (-- : 2) + + >>> print(face_cube) + face_data / (K) (-- : 2; height: 3) + Dimension coordinates: + height - x + Mesh coordinates: + latitude x - + longitude x - + Mesh: + name my_mesh + location face + Attributes: + Conventions 'CF-1.7' + +.. note:: + + We recommend caution if constraining on coordinates associated with a + :class:`~iris.mesh.MeshXY`. An individual coordinate value + might not be shared by any other data points, and using a coordinate range + will demand notably higher performance given the size of the dimension + versus structured grids + (:ref:`see the data model detail `). + +The :func:`iris.mesh.load_mesh` and +:func:`~iris.mesh.load_meshes` functions allow only +:class:`~iris.mesh.MeshXY`\es to be loaded from a file without +creating any associated :class:`~iris.cube.Cube`\s: + +.. dropdown:: Code + :icon: code + + .. doctest:: ugrid_operations + + >>> from iris.mesh import load_mesh + + >>> loaded_mesh = load_mesh(cubelist_path) + + >>> print(loaded_mesh) + MeshXY : 'my_mesh' + topology_dimension: 2 + node + node_dimension: 'Mesh2d_node' + node coordinates + + + edge + edge_dimension: 'Mesh2d_edge' + edge_node_connectivity: + edge coordinates + + + face + face_dimension: 'Mesh2d_face' + face_node_connectivity: + face coordinates + + + long_name: 'my_mesh' + var_name: 'my_mesh' + +Plotting +-------- +.. |tagline: plotting| replace:: |different| - plot with GeoVista + +.. rubric:: |tagline: plotting| + +The Cartopy-Matplotlib combination is not optimised for displaying the high +number of irregular shapes associated with meshes. Thankfully mesh +visualisation is already popular in many other fields (e.g. CGI, gaming, +SEM microscopy), so there is a wealth of tooling available, which +:ref:`ugrid geovista` harnesses for cartographic plotting. + +GeoVista's default behaviour is to convert lat-lon information into full XYZ +coordinates so the data is visualised on the surface of a 3D globe; 2D +projections are also supported. The plots are interactive by default, so it's +easy to explore the data in detail. + +Performing GeoVista operations on your :class:`~iris.cube.Cube` is made +easy via this convenience: +:func:`iris.experimental.geovista.cube_to_polydata`. + +Below is an example of using GeoVista to plot a low-res +sample :attr:`~iris.cube.Cube.mesh` based :class:`~iris.cube.Cube`. For +some truly spectacular visualisations of high-res data please see the +GeoVista :external+geovista:doc:`generated/gallery/index`. + +.. dropdown:: Code + :icon: code + + .. code-block:: python + + >>> from geovista import GeoPlotter, Transform + >>> from geovista.common import to_cartesian + >>> import matplotlib.pyplot as plt + + >>> from iris import load_cube, sample_data_path + >>> from iris.experimental.geovista import cube_to_polydata + + >>> sample_mesh_cube = load_cube(sample_data_path("mesh_C4_synthetic_float.nc")) + >>> print(sample_mesh_cube) + synthetic / (1) (-- : 96) + Mesh coordinates: + latitude x + longitude x + Mesh: + name Topology data of 2D unstructured mesh + location face + Attributes: + NCO 'netCDF Operators version 4.7.5 (Homepage = http://nco.sf.net, Code = h ...' + history 'Mon Apr 12 01:44:41 2021: ncap2 -s synthetic=float(synthetic) mesh_C4_synthetic.nc ...' + nco_openmp_thread_number 1 + + # Convert our mesh+data to a PolyData object. + >>> face_polydata = cube_to_polydata(sample_mesh_cube) + >>> print(face_polydata) + PolyData (... + N Cells: 96 + N Points: 98 + N Strips: 0 + X Bounds: -1.000e+00, 1.000e+00 + Y Bounds: -1.000e+00, 1.000e+00 + Z Bounds: -1.000e+00, 1.000e+00 + N Arrays: 4 + + # Create the GeoVista plotter and add our mesh+data to it. + >>> my_plotter = GeoPlotter() + >>> my_plotter.add_coastlines() + >>> my_plotter.add_mesh(face_polydata) + >>> my_plotter.show() + + .. image:: images/plotting.png + :alt: A GeoVista plot of low-res sample data. + +Region Extraction +----------------- +.. |tagline: region extraction| replace:: |different| - use GeoVista for mesh analysis + +.. rubric:: |tagline: region extraction| + +As described in :doc:`data_model`, indexing for a range along a +:class:`~iris.cube.Cube`\'s :meth:`~iris.cube.Cube.mesh_dim` will not provide +a contiguous region, since **position on the unstructured dimension is +unrelated to spatial position**. This means that subsetted +:class:`~iris.mesh.MeshCoord`\s cannot be reliably interpreted +as intended, and subsetting a :class:`~iris.mesh.MeshCoord` is +therefore set to return an :class:`~iris.coords.AuxCoord` instead - breaking +the link between :class:`~iris.cube.Cube` and +:class:`~iris.mesh.MeshXY`: + +.. dropdown:: Code + :icon: code + + .. doctest:: ugrid_operations + + >>> edge_cube = my_cubelist.extract_cube("edge_data") + >>> print(edge_cube) + edge_data / (K) (-- : 6; height: 3) + Dimension coordinates: + height - x + Mesh coordinates: + latitude x - + longitude x - + Mesh: + name my_mesh + location edge + + # Sub-setted MeshCoords have become AuxCoords. + >>> print(edge_cube[:-1]) + edge_data / (K) (-- : 5; height: 3) + Dimension coordinates: + height - x + Auxiliary coordinates: + latitude x - + longitude x - + +Extracting a region therefore requires extra steps - to determine the spatial +position of the data points before they can be analysed as inside/outside the +selected region. The recommended way to do this is using tools provided by +:ref:`ugrid geovista`, which is optimised for performant mesh analysis. + +Performing GeoVista operations on your :class:`~iris.cube.Cube` is made +easy via this convenience: +:func:`iris.experimental.geovista.cube_to_polydata`. + +An Iris convenience for regional extraction is also provided: +:func:`iris.experimental.geovista.extract_unstructured_region`; demonstrated +below: + + +.. dropdown:: Code + :icon: code + + .. doctest:: ugrid_operations + + >>> from geovista.geodesic import BBox + >>> from iris import load_cube, sample_data_path + >>> from iris.experimental.geovista import cube_to_polydata, extract_unstructured_region + + >>> sample_mesh_cube = load_cube(sample_data_path("mesh_C4_synthetic_float.nc")) + >>> print(sample_mesh_cube) + synthetic / (1) (-- : 96) + Mesh coordinates: + latitude x + longitude x + Mesh: + name Topology data of 2D unstructured mesh + location face + Attributes: + NCO 'netCDF Operators version 4.7.5 (Homepage = http://nco.sf.net, Code = h ...' + history 'Mon Apr 12 01:44:41 2021: ncap2 -s synthetic=float(synthetic) mesh_C4_synthetic.nc ...' + nco_openmp_thread_number 1 + + >>> regional_cube = extract_unstructured_region( + ... cube=sample_mesh_cube, + ... polydata=cube_to_polydata(sample_mesh_cube), + ... region=BBox(lons=[0, 70, 70, 0], lats=[-25, -25, 45, 45]), + ... preference="center", + ... ) + >>> print(regional_cube) + synthetic / (1) (-- : 11) + Mesh coordinates: + latitude x + longitude x + Mesh: + name unknown + location face + Attributes: + NCO 'netCDF Operators version 4.7.5 (Homepage = http://nco.sf.net, Code = h ...' + history 'Mon Apr 12 01:44:41 2021: ncap2 -s synthetic=float(synthetic) mesh_C4_synthetic.nc ...' + nco_openmp_thread_number 1 + + +Regridding +---------- +.. |tagline: regridding| replace:: |different| - use iris-esmf-regrid for mesh regridders + +.. rubric:: |tagline: regridding| + +Regridding to or from a mesh requires different logic than Iris' existing +regridders, which are designed for structured grids. For this we recommend +ESMF's powerful regridding tools, which integrate with Iris' mesh data model +via the :ref:`ugrid iris-esmf-regrid` package. + +.. todo: inter-sphinx links when available. + +Regridding is achieved via the +:class:`esmf_regrid.experimental.unstructured_scheme.MeshToGridESMFRegridder` +and +:class:`~esmf_regrid.experimental.unstructured_scheme.GridToMeshESMFRegridder` +classes. Regridding from a source :class:`~iris.cube.Cube` to a target +:class:`~iris.cube.Cube` involves initialising and then calling one of these +classes. Initialising is done by passing in the source and target +:class:`~iris.cube.Cube` as arguments. The regridder is then called by passing +the source :class:`~iris.cube.Cube` as an argument. We can demonstrate this +with the +:class:`~esmf_regrid.experimental.unstructured_scheme.MeshToGridESMFRegridder`: + +.. + Not using doctest here as want to keep iris-esmf-regrid as optional dependency. + +.. dropdown:: Code + :icon: code + + .. code-block:: python + + >>> from esmf_regrid.experimental.unstructured_scheme import MeshToGridESMFRegridder + >>> from iris import load, load_cube + + # You could also download these files from github.com/SciTools/iris-test-data. + >>> from iris.tests import get_data_path + >>> mesh_file = get_data_path( + ... ["NetCDF", "unstructured_grid", "lfric_surface_mean.nc"] + ... ) + >>> grid_file = get_data_path( + ... ["NetCDF", "regrid", "regrid_template_global_latlon.nc"] + ... ) + + # Load a list of cubes defined on the same Mesh. + >>> mesh_cubes = load(mesh_file) + + # Extract a specific cube. + >>> mesh_cube1 = mesh_cubes.extract_cube("sea_surface_temperature") + >>> print(mesh_cube1) + sea_surface_temperature / (K) (-- : 1; -- : 13824) + Mesh coordinates: + latitude - x + longitude - x + Auxiliary coordinates: + time x - + Cell methods: + 0 time: mean (interval: 300 s) + 1 time_counter: mean + Attributes: + Conventions UGRID + description Created by xios + interval_operation 300 s + interval_write 1 d + name lfric_surface + online_operation average + timeStamp 2020-Feb-07 16:23:14 GMT + title Created by xios + uuid 489bcef5-3d1c-4529-be42-4ab5f8c8497b + + # Load the target grid. + >>> sample_grid = load_cube(grid_file) + >>> print(sample_grid) + sample_grid / (unknown) (latitude: 180; longitude: 360) + Dimension coordinates: + latitude x - + longitude - x + Attributes: + Conventions 'CF-1.7' + + # Initialise the regridder. + >>> rg = MeshToGridESMFRegridder(mesh_cube1, sample_grid) + + # Regrid the mesh cube cube. + >>> result1 = rg(mesh_cube1) + >>> print(result1) + sea_surface_temperature / (K) (-- : 1; latitude: 180; longitude: 360) + Dimension coordinates: + latitude - x - + longitude - - x + Auxiliary coordinates: + time x - - + Cell methods: + 0 time: mean (interval: 300 s) + 1 time_counter: mean + Attributes: + Conventions UGRID + description Created by xios + interval_operation 300 s + interval_write 1 d + name lfric_surface + online_operation average + timeStamp 2020-Feb-07 16:23:14 GMT + title Created by xios + uuid 489bcef5-3d1c-4529-be42-4ab5f8c8497b + +.. note:: + + **All** :class:`~iris.cube.Cube` :attr:`~iris.cube.Cube.attributes` are + retained when regridding, so watch out for any attributes that reference + the format (there are several in these examples) - you may want to manually + remove them to avoid later confusion. + +The initialisation process is computationally expensive so we use caching to +improve performance. Once a regridder has been initialised, it can be used on +any :class:`~iris.cube.Cube` which has been defined on the same +:class:`~iris.mesh.MeshXY` (or on the same **grid** in the case of +:class:`~esmf_regrid.experimental.unstructured_scheme.GridToMeshESMFRegridder`). +Since calling a regridder is usually a lot faster than initialising, reusing +regridders can save a lot of time. We can demonstrate the reuse of the +previously initialised regridder: + +.. dropdown:: Code + :icon: code + + .. code-block:: python + + # Extract a different cube defined on the same Mesh. + >>> mesh_cube2 = mesh_cubes.extract_cube("precipitation_flux") + >>> print(mesh_cube2) + precipitation_flux / (kg m-2 s-1) (-- : 1; -- : 13824) + Mesh coordinates: + latitude - x + longitude - x + Auxiliary coordinates: + time x - + Cell methods: + 0 time: mean (interval: 300 s) + 1 time_counter: mean + Attributes: + Conventions UGRID + description Created by xios + interval_operation 300 s + interval_write 1 d + name lfric_surface + online_operation average + timeStamp 2020-Feb-07 16:23:14 GMT + title Created by xios + uuid 489bcef5-3d1c-4529-be42-4ab5f8c8497b + + # Regrid the new mesh cube using the same regridder. + >>> result2 = rg(mesh_cube2) + >>> print(result2) + precipitation_flux / (kg m-2 s-1) (-- : 1; latitude: 180; longitude: 360) + Dimension coordinates: + latitude - x - + longitude - - x + Auxiliary coordinates: + time x - - + Cell methods: + 0 time: mean (interval: 300 s) + 1 time_counter: mean + Attributes: + Conventions UGRID + description Created by xios + interval_operation 300 s + interval_write 1 d + name lfric_surface + online_operation average + timeStamp 2020-Feb-07 16:23:14 GMT + title Created by xios + uuid 489bcef5-3d1c-4529-be42-4ab5f8c8497b + +Support also exists for saving and loading previously initialised regridders - +:func:`esmf_regrid.experimental.io.save_regridder` and +:func:`~esmf_regrid.experimental.io.load_regridder` - so that they can be +re-used by future scripts. + +Equality +-------- +.. |tagline: equality| replace:: |unchanged| + +.. rubric:: |tagline: equality| + +:class:`~iris.mesh.MeshXY` comparison is supported, and comparing +two ':class:`~iris.mesh.MeshXY`-:class:`~iris.cube.Cube`\s' will +include a comparison of the respective +:class:`~iris.mesh.MeshXY`\es, with no extra action needed by the +user. + +.. note:: + + Keep an eye on memory demand when comparing large + :class:`~iris.mesh.MeshXY`\es, but note that + :class:`~iris.mesh.MeshXY`\ equality is enabled for lazy + processing (:doc:`/userguide/real_and_lazy_data`), so if the + :class:`~iris.mesh.MeshXY`\es being compared are lazy the + process will use less memory than their total size. + +Combining Cubes +--------------- +.. |tagline: combining cubes| replace:: |pending| + +.. rubric:: |tagline: combining cubes| + +Merging or concatenating :class:`~iris.cube.Cube`\s (described in +:doc:`/userguide/merge_and_concat`) with two different +:class:`~iris.mesh.MeshXY`\es is not possible - a +:class:`~iris.cube.Cube` must be associated with just a single +:class:`~iris.mesh.MeshXY`, and merge/concatenate are not yet +capable of combining multiple :class:`~iris.mesh.MeshXY`\es into +one. + +:class:`~iris.cube.Cube`\s that include +:class:`~iris.mesh.MeshCoord`\s can still be merged/concatenated +on dimensions other than the :meth:`~iris.cube.Cube.mesh_dim`, but only if their +:class:`~iris.cube.Cube.mesh`\es are *equal* (in practice, identical, even to +matching ``var_name``\s). + +.. seealso:: + + You may wish to investigate + :func:`iris.mesh.recombine_submeshes`, which can be used + for a very specific type of :class:`~iris.mesh.MeshXY` + combination not detailed here. + +Arithmetic +---------- +.. |tagline: arithmetic| replace:: |unchanged| + +.. rubric:: |tagline: arithmetic| + +Cube Arithmetic (described in :doc:`/userguide/cube_maths`) +has been extended to handle :class:`~iris.cube.Cube`\s that include +:class:`~iris.mesh.MeshCoord`\s, and hence have a ``cube.mesh``. + +Cubes with meshes can be combined in arithmetic operations like +"ordinary" cubes. They can combine with other cubes without that mesh +(and its dimension); or with a matching mesh, which may be on a different +dimension. +Arithmetic can also be performed between a cube with a mesh and a mesh +coordinate with a matching mesh. + +In all cases, the result will have the same mesh as the input cubes. + +Meshes only match if they are fully equal -- i.e. they contain all the same +coordinates and connectivities, with identical names, units, attributes and +data content. + + +.. todo: + Enumerate other popular operations that aren't yet possible + (and are they planned soon?) + +.. |new| replace:: ✨ New +.. |unchanged| replace:: ♻️ Unchanged +.. |different| replace:: ⚠️ Different +.. |pending| replace:: 🚧 Support Pending diff --git a/docs/src/further_topics/ugrid/other_meshes.rst b/docs/src/further_topics/ugrid/other_meshes.rst new file mode 100644 index 0000000000..19f220be82 --- /dev/null +++ b/docs/src/further_topics/ugrid/other_meshes.rst @@ -0,0 +1,364 @@ +.. _other_meshes: + +Converting Other Mesh Formats +***************************** + +Iris' Mesh Data Model is based primarily on the CF-UGRID conventions (see +:doc:`data_model`), but other mesh formats can be converted to fit into this +model, **enabling use of Iris' specialised mesh support**. Below are some +examples demonstrating how this works for various mesh formats. + +.. contents:: + :local: + +`FESOM 1.4`_ Voronoi Polygons +----------------------------- +.. figure:: images/fesom_mesh.png + :width: 300 + :alt: Sample of FESOM mesh voronoi polygons, with variable numbers of sides. + +A FESOM mesh encoded in a NetCDF file includes: + +* X+Y point coordinates +* X+Y corners coordinates of the Voronoi Polygons around these points - + represented as the bounds of the coordinates + +To represent the Voronoi Polygons as faces, the corner coordinates will be used +as the **nodes** when creating the Iris +:class:`~iris.ugrid.mesh.MeshXY`. + +.. dropdown:: Code + :icon: code + + .. code-block:: python + + >>> import iris + >>> from iris.ugrid import MeshXY + + + >>> temperature_cube = iris.load_cube("my_file.nc", "sea_surface_temperature") + >>> print(temperature_cube) + sea_surface_temperature / (degC) (time: 12; -- : 126859) + Dimension coordinates: + time x - + Auxiliary coordinates: + latitude - x + longitude - x + Cell methods: + 0 area: mean where sea + 1 time: mean + Attributes: + grid 'FESOM 1.4 (unstructured grid in the horizontal with 126859 wet nodes;... + ... + + >>> print(temperature_cube.coord("longitude")) + AuxCoord : longitude / (degrees) + points: + bounds: + shape: (126859,) bounds(126859, 18) + dtype: float64 + standard_name: 'longitude' + var_name: 'lon' + + # Use a Mesh to represent the Cube's horizontal geography, by replacing + # the existing face AuxCoords with new MeshCoords. + >>> fesom_mesh = MeshXY.from_coords(temperature_cube.coord('longitude'), + ... temperature_cube.coord('latitude')) + >>> for new_coord in fesom_mesh.to_MeshCoords("face"): + ... old_coord = temperature_cube.coord(new_coord.name()) + ... unstructured_dim, = old_coord.cube_dims(temperature_cube) + ... temperature_cube.remove_coord(old_coord) + ... temperature_cube.add_aux_coord(new_coord, unstructured_dim) + + >>> print(temperature_cube) + sea_surface_temperature / (degC) (time: 12; -- : 126859) + Dimension coordinates: + time x - + Mesh coordinates: + latitude - x + longitude - x + Cell methods: + 0 area: mean where sea + 1 time: mean + Attributes: + grid 'FESOM 1.4 (unstructured grid in the horizontal with 126859 wet nodes;... + ... + + >>> print(temperature_cube.mesh) + MeshXY : 'unknown' + topology_dimension: 2 + node + node_dimension: 'Mesh2d_node' + node coordinates + shape(2283462,)> + shape(2283462,)> + face + face_dimension: 'Mesh2d_face' + face_node_connectivity: shape(126859, 18)> + face coordinates + shape(126859,)> + shape(126859,)> + +`WAVEWATCH III`_ Spherical Multi-Cell (SMC) WAVE Quad Grid +---------------------------------------------------------- +.. figure:: images/smc_mesh.png + :width: 300 + :alt: Sample of an SMC mesh, with decreasing quad sizes at the coastlines. + +An SMC grid encoded in a NetCDF file includes: + +* X+Y face centre coordinates +* X+Y base face sizes +* X+Y face size factors + +From this information we can derive face corner coordinates, which will be used +as the **nodes** when creating the Iris +:class:`~iris.ugrid.mesh.MeshXY`. + + +.. dropdown:: Code + :icon: code + + .. code-block:: python + + >>> import iris + >>> from iris.ugrid import MeshXY + >>> import numpy as np + + + >>> wave_cube = iris.load_cube("my_file.nc", "sea_surface_wave_significant_height") + >>> print(wave_cube) + sea_surface_wave_significant_height / (m) (time: 7; -- : 666328) + Dimension coordinates: + time x - + Auxiliary coordinates: + forecast_period x - + latitude - x + latitude cell size factor - x + longitude - x + longitude cell size factor - x + Scalar coordinates: + forecast_reference_time 2021-12-05 00:00:00 + Attributes: + SIN4 namelist parameter BETAMAX 1.39 + SMC_grid_type 'seapoint' + WAVEWATCH_III_switches 'NOGRB SHRD PR2 UNO SMC FLX0 LN1 ST4 NL1 BT1 DB1 TR0 BS0 IC0 IS0 REF0 WNT1... + WAVEWATCH_III_version_number '7.13' + altitude_resolution 'n/a' + area 'Global wave model GS512L4EUK' + base_lat_size 0.029296871 + base_lon_size 0.043945305 + ... + + >>> faces_x = wave_cube.coord("longitude") + >>> faces_y = wave_cube.coord("latitude") + >>> face_size_factor_x = wave_cube.coord("longitude cell size factor") + >>> face_size_factor_y = wave_cube.coord("latitude cell size factor") + >>> base_x_size = wave_cube.attributes["base_lon_size"] + >>> base_y_size = wave_cube.attributes["base_lat_size"] + + # Calculate face corners from face centres and face size factors. + >>> face_centres_x = faces_x.points + >>> face_centres_y = faces_y.points + >>> face_size_x = face_size_factor_x.points * base_x_size + >>> face_size_y = face_size_factor_y.points * base_y_size + + >>> x_mins = (face_centres_x - 0.5 * face_size_x).reshape(-1, 1) + >>> x_maxs = (face_centres_x + 0.5 * face_size_x).reshape(-1, 1) + >>> y_mins = (face_centres_y - 0.5 * face_size_y).reshape(-1, 1) + >>> y_maxs = (face_centres_y + 0.5 * face_size_y).reshape(-1, 1) + + >>> face_corners_x = np.hstack([x_mins, x_maxs, x_maxs, x_mins]) + >>> face_corners_y = np.hstack([y_mins, y_mins, y_maxs, y_maxs]) + + # Add face corners as coordinate bounds. + >>> faces_x.bounds = face_corners_x + >>> faces_y.bounds = face_corners_y + + # Use a Mesh to represent the Cube's horizontal geography, by replacing + # the existing face AuxCoords with new MeshCoords. + >>> smc_mesh = MeshXY.from_coords(faces_x, faces_y) + >>> for new_coord in smc_mesh.to_MeshCoords("face"): + ... old_coord = wave_cube.coord(new_coord.name()) + ... unstructured_dim, = old_coord.cube_dims(wave_cube) + ... wave_cube.remove_coord(old_coord) + ... wave_cube.add_aux_coord(new_coord, unstructured_dim) + + >>> print(wave_cube) + sea_surface_wave_significant_height / (m) (time: 7; -- : 666328) + Dimension coordinates: + time x - + Mesh coordinates: + latitude - x + longitude - x + Auxiliary coordinates: + forecast_period x - + latitude cell size factor - x + longitude cell size factor - x + Scalar coordinates: + forecast_reference_time 2021-12-05 00:00:00 + Attributes: + SIN4 namelist parameter BETAMAX 1.39 + SMC_grid_type 'seapoint' + WAVEWATCH_III_switches 'NOGRB SHRD PR2 UNO SMC FLX0 LN1 ST4 NL1 BT1 DB1 TR0 BS0 IC0 IS0 REF0 WNT1... + WAVEWATCH_III_version_number '7.13' + altitude_resolution 'n/a' + area 'Global wave model GS512L4EUK' + base_lat_size 0.029296871 + base_lon_size 0.043945305 + ... + + >>> print(wave_cube.mesh) + MeshXY : 'unknown' + topology_dimension: 2 + node + node_dimension: 'Mesh2d_node' + node coordinates + + + face + face_dimension: 'Mesh2d_face' + face_node_connectivity: + face coordinates + + + + +.. _ORCA_example: + +`NEMO`_ data on ORCA tripolar grid +---------------------------------- +.. figure:: images/orca_grid.png + :width: 300 + :alt: Plot of ORCA-gridded data from NEMO. + :class: dark-light + +NEMO can use various grids, but is frequently used with ORCA type grids. +ORCA grids store global data in 2-dimensional ny * nx arrays. All cells are +four-sided. The grids are based on tri-polar layouts, but X and Y spacings are +irregular and not given by any defined functional forms. + +* arrays (ny, nx) of face-located data variables +* arrays (ny, nx) of X+Y face centre coordinates +* arrays (ny, nx, 4) of X+Y face corner coordinates + (all faces are quadrilaterals) + +For simplicity, we treat each face corner as an independent node, and use a face-node +connectivity which simply lists the nodes in sequence, +i.e. [[0, 1, 2, 3], [4, 5, 6, 7], ...]. + +.. Note:: + This is the simplest solution, but produces approx 4x more nodes than + necessary, since the coordinate bounds contain many duplicate locations. + Removing the duplicates is quite easy, but often not necessary. + +To make an unstructured cube, the data must be 'flattened' to convert the given X and Y +dimensions into a single mesh dimension. Since Iris cubes don't support a "reshape" or +"flatten" operations, we create a new cube from the flattened data. + +.. dropdown:: Code + :icon: code + + .. code-block:: python + + >>> import numpy as np + >>> import iris + >>> from iris.coords import AuxCoord, CellMeasure + >>> from iris.cube import Cube + >>> from iris.ugrid.mesh import MeshXY, Connectivity + + + >>> filepath = iris.sample_data_path('orca2_votemper.nc') + >>> cube = iris.load_cube(filepath) + >>> print(cube) + sea_water_potential_temperature / (degC) (-- : 148; -- : 180) + Auxiliary coordinates: + latitude x x + longitude x x + Scalar coordinates: + depth 4.999938 m, bound=(0.0, 10.0) m + time 0001-01-01 12:00:00 + Cell methods: + 0 time: mean + Attributes: + Conventions 'CF-1.5' + + + >>> co_x = cube.coord("longitude") + >>> co_y = cube.coord("latitude") + >>> ny, nx = co_x.shape + >>> n_faces = ny * nx + + >>> # Create face coords from flattened face-points + >>> face_x_co = AuxCoord(co_x.points.flatten()) + >>> face_y_co = AuxCoord(co_y.points.flatten()) + >>> assert face_x_co.shape == (n_faces,) + >>> face_x_co.metadata = co_x.metadata + >>> face_y_co.metadata = co_y.metadata + + >>> # Create node coordinates from bound points. + >>> n_nodes = n_faces * 4 + >>> node_x_co = AuxCoord(co_x.bounds.flatten()) + >>> node_y_co = AuxCoord(co_y.bounds.flatten()) + >>> assert node_x_co.shape == (n_nodes,) + >>> node_x_co.metadata = co_x.metadata + >>> node_y_co.metadata = co_y.metadata + + >>> # Create a face-node Connectivity matching the order of nodes in the bounds array + >>> face_node_inds = np.arange(n_nodes).reshape((n_faces, 4)) + >>> face_nodes_conn = Connectivity( + ... indices=face_node_inds, + ... cf_role='face_node_connectivity', + ... long_name='face_inds', units='1', + ... ) + + >>> # Create a mesh object. + >>> mesh = MeshXY( + ... topology_dimension=2, + ... node_coords_and_axes=[(node_x_co, 'x'), (node_y_co, 'y')], + ... connectivities=face_nodes_conn, + ... face_coords_and_axes=[(face_x_co, 'x'), (face_y_co, 'y')] + ... ) + >>> print(mesh) + MeshXY : 'unknown' + topology_dimension: 2 + node + node_dimension: 'Mesh2d_node' + node coordinates + + + face + face_dimension: 'Mesh2d_face' + face_node_connectivity: + face coordinates + + + + + >>> # Create an unstructured version of the input with flattened data + >>> meshcube = Cube(cube.core_data().flatten()) + >>> meshcube.metadata = cube.metadata + + >>> # Attach the mesh by adding the mesh 'face' MeshCoords into the cube + >>> mesh_dim = meshcube.ndim - 1 + >>> for co in mesh.to_MeshCoords('face'): + ... meshcube.add_aux_coord(co, mesh_dim) + ... + + >>> print(meshcube) + sea_water_potential_temperature / (degC) (-- : 26640) + Mesh coordinates: + latitude x + longitude x + Mesh: + name unknown + location face + Cell methods: + 0 time: mean + Attributes: + Conventions 'CF-1.5' + + +.. _WAVEWATCH III: https://github.com/NOAA-EMC/WW3 +.. _FESOM 1.4: https://fesom.de/models/fesom14/ +.. _NEMO: https://www.nemo-ocean.eu/ \ No newline at end of file diff --git a/docs/src/further_topics/ugrid/partner_packages.rst b/docs/src/further_topics/ugrid/partner_packages.rst new file mode 100644 index 0000000000..f69546446c --- /dev/null +++ b/docs/src/further_topics/ugrid/partner_packages.rst @@ -0,0 +1,101 @@ +.. include:: ../../common_links.inc + +.. _ugrid partners: + +Iris' Mesh Partner Packages +**************************** +Python is an easy to use language and has formed a very strong collaborative +scientific community, which is why Iris is written in Python. *Performant* +Python relies on calls down to low level languages like C, which is ideal for +structured grid work since +they can be directly represented as NumPy arrays. This is more difficult when +working with unstructured meshes where extra steps are needed to determine data +position (:ref:`see the data model detail `), and we need +to find ways of again passing the operations down to more optimised languages. + +The Iris team are therefore developing 'wrapper' packages, which make it quick +and easy to analyse Iris mesh data via some popular Python packages that use +powerful tools under the hood, working in C and other languages. + +These solutions have been placed in their own 'partner packages' for several +reasons: + +* Can be useful to others who are not using Iris. + + * Everyone working with multi-dimensional geographic datasets shares common + problems that need solving. + * Wider user base = stronger community = better solutions. + +* Only some Iris users will need them - they are **optional** Iris dependencies. + + * They introduce a lot of new API. + * They introduce new large dependencies that take time to install and need + disk space. + +Below you can learn more about the partner packages and how they are useful. +Specifics of what operations would require their installation can be found in: +:doc:`operations`. + +.. important:: **Experimental** + + As with Iris' mesh support, these packages are still in the + experimental stages. They would love your feedback, but as immature + packages their API, documentation, test coverage and CI are still + 'under construction'. + + +.. _`ugrid geovista`: + +`GeoVista`_ +=========== +.. image:: images/geovistalogo.svg + :width: 300 + :class: no-scaled-link + +.. rubric:: "Cartographic rendering and mesh analytics powered by `PyVista`_" + +PyVista is described as "VTK for humans" - VTK is a very powerful toolkit for +working with meshes, and PyVista brings that power into the Python ecosystem. +GeoVista in turn makes it easy to use PyVista specifically for cartographic +work, designed from the start with the Iris +:class:`~iris.mesh.MeshXY` in mind. + +Applications +------------ +* Interactively plot mesh data: + + * On a 3D globe. + * On your favourite projection. + +* Extract a specific region from a mesh. +* Combine multiple meshes into one. + +.. _`ugrid iris-esmf-regrid`: + +`iris-esmf-regrid`_ +=================== +.. image:: images/iris-esmf-regrid.svg + :width: 300 + :class: no-scaled-link + +.. rubric:: "A collection of structured and unstructured ESMF regridding schemes for Iris" + +ESMF provide a sophisticated, performant regridding utility that supports a +variety of regridding types with both structured grids and unstructured meshes, +and this also has a flexible Python interface - ESMPy. iris-esmf-regrid takes +advantage of having a specific use-case - regridding Iris +:class:`~iris.cube.Cube`\s - to provide ESMPy-Iris wrappers that make the +process as easy as possible, with highly optimised performance. + +Applications +------------ +* Regrid structured to unstructured. +* Regrid unstructured to structured. +* Regrid with dask integration, computing in parallel and maintaining data + laziness. +* | Save a prepared regridder for reuse in subsequent runs. + | Regridders can even be re-used on sources with different masks - a + significant efficiency gain. + +.. _GeoVista: https://github.com/bjlittle/geovista +.. _PyVista: https://docs.pyvista.org/index.html diff --git a/docs/iris/src/whitepapers/um_files_loading.rst b/docs/src/further_topics/um_files_loading.rst similarity index 90% rename from docs/iris/src/whitepapers/um_files_loading.rst rename to docs/src/further_topics/um_files_loading.rst index fd2d2a2341..c5238e6b70 100644 --- a/docs/iris/src/whitepapers/um_files_loading.rst +++ b/docs/src/further_topics/um_files_loading.rst @@ -1,5 +1,3 @@ -.. _um_files_loading: - .. testsetup:: import numpy as np @@ -13,8 +11,10 @@ np.set_printoptions(precision=8) +.. _um_files_loading: + =================================== -Iris handling of PP and Fieldsfiles +Iris Handling of PP and Fieldsfiles =================================== This document provides a basic account of how PP and Fieldsfiles data is @@ -30,17 +30,17 @@ Notes: #. Iris treats Fieldsfile data almost exactly as if it were PP -- i.e. it treats each field's lookup table entry like a PP header. -#. The Iris datamodel is based on - `NetCDF CF conventions `_, so most of this can +#. The Iris data model is based on + `NetCDF CF conventions `_, so most of this can also be seen as a metadata translation between PP and CF terms, but it is easier to discuss in terms of Iris elements. For details of Iris terms (cubes, coordinates, attributes), refer to :ref:`Iris data structures `. -For details of CF conventions, see http://cfconventions.org/. +For details of CF conventions, see https://cfconventions.org/. -Overview of loading process +Overview of Loading Process --------------------------- The basics of Iris loading are explained at :ref:`loading_iris_cubes`. @@ -125,21 +125,21 @@ with latitude and longitude axes are also supported). For an ordinary latitude-longitude grid, the cubes have coordinates called 'longitude' and 'latitude': - * These are mapped to the appropriate data dimensions. - * They have units of 'degrees'. - * They have a coordinate system of type :class:`iris.coord_systems.GeogCS`. - * The coordinate points are normally set to the regular sequence - ``ZDX/Y + BDX/Y * (1 .. LBNPT/LBROW)`` (*except*, if BDX/BDY is zero, the - values are taken from the extra data vector X/Y, if present). - * If X/Y_LOWER_BOUNDS extra data is available, this appears as bounds values - of the horizontal cooordinates. +* These are mapped to the appropriate data dimensions. +* They have units of 'degrees'. +* They have a coordinate system of type :class:`iris.coord_systems.GeogCS`. +* The coordinate points are normally set to the regular sequence + ``ZDX/Y + BDX/Y * (1 .. LBNPT/LBROW)`` (*except*, if BDX/BDY is zero, the + values are taken from the extra data vector X/Y, if present). +* If X/Y_LOWER_BOUNDS extra data is available, this appears as bounds values + of the horizontal coordinates. For **rotated** latitude-longitude coordinates (as for LBCODE=101), the horizontal coordinates differ only slightly -- - * The names are 'grid_latitude' and 'grid_longitude'. - * The coord_system is a :class:`iris.coord_systems.RotatedGeogCS`, created - with a pole defined by BPLAT, BPLON. +* The names are 'grid_latitude' and 'grid_longitude'. +* The coord_system is a :class:`iris.coord_systems.RotatedGeogCS`, created + with a pole defined by BPLAT, BPLON. For example: >>> # Load a PP field. @@ -165,7 +165,7 @@ For example: sections are written only if the actual values are unevenly spaced. -Phenomenon identification +Phenomenon Identification ------------------------- **UM Field elements** @@ -218,7 +218,7 @@ For example: LBUSER4 and LBUSER7 elements. -Vertical coordinates +Vertical Coordinates -------------------- **UM Field elements** @@ -304,10 +304,9 @@ For hybrid height levels (LBVC=65): multidimensional or non-monotonic. See an example printout of a hybrid height cube, -:ref:`here `: - - Notice that this contains all of the above coordinates -- - 'model_level_number', 'sigma', 'level_height' and the derived 'altitude'. +:ref:`here `. Notice that this contains all of the +above coordinates -- ``model_level_number``, ``sigma``, ``level_height`` and +the derived ``altitude``. .. note:: @@ -319,7 +318,7 @@ See an example printout of a hybrid height cube, .. _um_time_metadata: -Time information +Time Information ---------------- **UM Field elements** @@ -336,7 +335,7 @@ Time information In Iris (as in CF) times and time intervals are both expressed as simple numbers, following the approach of the -`UDUNITS project `_. +`UDUNITS project `_. These values are stored as cube coordinates, where the scaling and calendar information is contained in the :attr:`~iris.coords.Coord.units` property. @@ -350,7 +349,7 @@ information is contained in the :attr:`~iris.coords.Coord.units` property. always 1st Jan 1970 (times before this are represented as negative values). The units.calendar property of time coordinates is set from the lowest decimal -digit of LBTIM, known as LBTIM.IC. Note that the non-gregorian calendars (e.g. +digit of LBTIM, known as LBTIM.IC. Note that the non-standard calendars (e.g. 360-day 'model' calendar) are defined in CF, not udunits. There are a number of different time encoding methods used in UM data, but the @@ -364,7 +363,7 @@ Data at a single measurement timepoint (LBTIM.IB=0): defined according to LBTIM.IC. Values forecast from T2, valid at T1 (LBTIM.IB=1): - Coordinates ``time` and ``forecast_reference_time`` are created from the T1 + Coordinates ``time`` and ``forecast_reference_time`` are created from the T1 and T2 values, respectively. These have no bounds, and units of 'hours since 1970-01-01 00:00:00', with the appropriate calendar. A ``forecast_period`` coordinate is also created, with values T1-T2, no @@ -383,15 +382,14 @@ these may become dimensions of the resulting data cube. This will depend on the values actually present in the source fields for each of the elements. See an example printout of a forecast data cube, -:ref:`here ` : - - Notice that this example contains all of the above coordinates -- 'time', - 'forecast_period' and 'forecast_reference_time'. In this case the data are - forecasts, so 'time' is a dimension, 'forecast_period' varies with time and - 'forecast_reference_time' is a constant. +:ref:`here `. Notice that this example +contains all of the above coordinates -- ``time``, ``forecast_period`` and +``forecast_reference_time``. In this case the data are forecasts, so ``time`` +is a dimension, ``forecast_period``` varies with time and +``forecast_reference_time`` is a constant. -Statistical measures +Statistical Measures -------------------- **UM Field elements** @@ -438,7 +436,7 @@ For example: (CellMethod(method='mean', coord_names=('time',), intervals=('6 hour',), comments=()),) -Other metadata +Other Metadata -------------- LBRSVD4 diff --git a/docs/src/further_topics/which_regridder_to_use.rst b/docs/src/further_topics/which_regridder_to_use.rst new file mode 100644 index 0000000000..dae273252d --- /dev/null +++ b/docs/src/further_topics/which_regridder_to_use.rst @@ -0,0 +1,422 @@ +.. include:: ../common_links.inc + +.. _which_regridder_to_use: + +====================== +Which Regridder to Use +====================== + +This section compares all the regridding schemes which exist in `Iris`_, and +externally in `iris-esmf-regrid`_ with a view to helping you to choose the right +regridder for your workflow. The choice of regridder +is usually limited by the kind of data you are going from and to, but there are +also factors of performance and numerical accuracy to consider. This section +provides a reference for how each of the regridders differ with respect to +these factors, beginning with a set of short tables going into their differences +in brief and ending in a more in depth look at how these differences might +play out in different contexts. + +For an introduction on using regridders, see the :ref:`user guide`. + +Regridder Comparison +==================== + +We will highlight here some of the properties of each regridder in a table of +the following form: + ++-----------------+-----------------------------------------------------------+ +| **API** | Link to API documentation. | ++-----------------+-----------------------------------------------------------+ +| **Method** | The type of algorithm used to calculate the result. | +| | See section on `comparing methods`_. | ++-----------------+-----------------------------------------------------------+ +| **Source Grid** | The type of **coordinates** required on the ``src`` cube. | ++-----------------+-----------------------------------------------------------+ +| **Target Grid** | The type of **coordinates** required on the ``tgt`` cube. | ++-----------------+-----------------------------------------------------------+ +| **Coordinate | The type of **coordinate system** required on the | +| System** | ``src``/``tgt`` cube coordinates. | ++-----------------+-----------------------------------------------------------+ +| **Lazy | If the result is calculated lazily. See | +| Regridding** | :doc:`real and lazy data `.| ++-----------------+-----------------------------------------------------------+ +| **Weights | See `regridder performance`_. | +| Caching** | | ++-----------------+-----------------------------------------------------------+ +| **Notes** | Additional details. | ++-----------------+-----------------------------------------------------------+ + +AreaWeighted +------------ + ++-----------------+--------------------------------------------------------+ +| **API** | :class:`~iris.analysis.AreaWeighted` | ++-----------------+--------------------------------------------------------+ +| **Method** | Conservative | ++-----------------+--------------------------------------------------------+ +| **Source Grid** | Pair of 1D lat/lon coordinates, must have bounds. | ++-----------------+--------------------------------------------------------+ +| **Target Grid** | Pair of 1D lat/lon coordinates, must have bounds. | ++-----------------+--------------------------------------------------------+ +| **Coordinate | Must be equal on ``src`` and ``tgt``, may be ``None``. | +| System** | | ++-----------------+--------------------------------------------------------+ +| **Lazy | ``True`` | +| Regridding** | | ++-----------------+--------------------------------------------------------+ +| **Weights | ``True`` | +| Caching** | | ++-----------------+--------------------------------------------------------+ +| **Notes** | Supports masked data with ``mdtol`` argument. | +| | See `area conservation`_. | ++-----------------+--------------------------------------------------------+ + +Linear +------ + ++-----------------+----------------------------------------------------------------+ +| **API** | :class:`~iris.analysis.Linear` | ++-----------------+----------------------------------------------------------------+ +| **Method** | Linear | ++-----------------+----------------------------------------------------------------+ +| **Source Grid** | Pair of 1D lat/lon coordinates. | ++-----------------+----------------------------------------------------------------+ +| **Target Grid** | Pair of 1D lat/lon coordinates. | ++-----------------+----------------------------------------------------------------+ +| **Coordinate | May be present on both ``src`` and ``tgt`` or both be ``None``.| +| System** | May be different. | ++-----------------+----------------------------------------------------------------+ +| **Lazy | ``True`` | +| Regridding** | | ++-----------------+----------------------------------------------------------------+ +| **Weights | ``False`` | +| Caching** | | ++-----------------+----------------------------------------------------------------+ +| **Notes** | Supports extrapolation outside source data bounds. | ++-----------------+----------------------------------------------------------------+ + +Nearest +------- + ++-----------------+----------------------------------------------------------------+ +| **API** | :class:`~iris.analysis.Nearest` | ++-----------------+----------------------------------------------------------------+ +| **Method** | Nearest (destination to source) | ++-----------------+----------------------------------------------------------------+ +| **Source Grid** | Pair of 1D lat/lon coordinates. | ++-----------------+----------------------------------------------------------------+ +| **Target Grid** | Pair of 1D lat/lon coordinates. | ++-----------------+----------------------------------------------------------------+ +| **Coordinate | May be present on both ``src`` and ``tgt`` or both be ``None``.| +| System** | May be different. | ++-----------------+----------------------------------------------------------------+ +| **Lazy | ``True`` | +| Regridding** | | ++-----------------+----------------------------------------------------------------+ +| **Weights | ``False`` | +| Caching** | | ++-----------------+----------------------------------------------------------------+ + +UnstructuredNearest +------------------- + ++-----------------+----------------------------------------------------+ +| **API** | :class:`~iris.analysis.UnstructuredNearest` | ++-----------------+----------------------------------------------------+ +| **Method** | Nearest (destination to source) | ++-----------------+----------------------------------------------------+ +| **Source Grid** | Pair of lat/lon coordinates with any dimensionality| +| | (e.g., 1D or 2D). Must be associated to the same | +| | axes on the source cube. | ++-----------------+----------------------------------------------------+ +| **Target Grid** | Pair of 1D lat/lon coordinates. | ++-----------------+----------------------------------------------------+ +| **Coordinate | Must be equal on ``src`` and ``tgt``, may be | +| System** | ``None``. | ++-----------------+----------------------------------------------------+ +| **Lazy | ``False`` | +| Regridding** | | ++-----------------+----------------------------------------------------+ +| **Weights | ``False`` | +| Caching** | | ++-----------------+----------------------------------------------------+ + +PointInCell +----------- + ++-----------------+----------------------------------------------------+ +| **API** | :class:`~iris.analysis.PointInCell` | ++-----------------+----------------------------------------------------+ +| **Method** | Point in cell | ++-----------------+----------------------------------------------------+ +| **Source Grid** | Pair of lat/lon coordinates with any dimensionality| +| | (e.g., 1D or 2D). Must be associated to the same | +| | axes on the source cube. | ++-----------------+----------------------------------------------------+ +| **Target Grid** | Pair of 1D lat/lon coordinates, must have bounds. | ++-----------------+----------------------------------------------------+ +| **Coordinate | Must be equal on ``srs`` and ``tgt``, may be | +| System** | ``None``. | ++-----------------+----------------------------------------------------+ +| **Lazy | ``False`` | +| Regridding** | | ++-----------------+----------------------------------------------------+ +| **Weights | ``True`` | +| Caching** | | ++-----------------+----------------------------------------------------+ + +External Regridders +=================== + +ESMFAreaWeighted +---------------- + ++-----------------+-------------------------------------------------------------------------+ +| **API** | :class:`~iris-esmf-regrid:esmf_regrid.schemes.ESMFAreaWeighted` | ++-----------------+-------------------------------------------------------------------------+ +| **Method** | Conservative | ++-----------------+-------------------------------------------------------------------------+ +| **Source Grid** | May be either: | +| | | +| | - A pair of 1D x/y coordinates on different axes. Must have bounds. | +| | - A pair of 2D x/y coordinates on the same axes. Must have bounds. | +| | - An unstructured mesh located on cell faces. | ++-----------------+-------------------------------------------------------------------------+ +| **Target Grid** | Any of the above. May be a different type to ``src`` grid. | ++-----------------+-------------------------------------------------------------------------+ +| **Coordinate | ``src`` and ``tgt`` grid may have any coordinate system or ``None``. | +| System** | | ++-----------------+-------------------------------------------------------------------------+ +| **Lazy | ``True`` | +| Regridding** | | ++-----------------+-------------------------------------------------------------------------+ +| **Weights | ``True`` | +| Caching** | | ++-----------------+-------------------------------------------------------------------------+ +| **Notes** | Supports masked data with ``mdtol`` argument (see `area conservation`_).| +| | Differs numerically to :class:`~iris.analysis.AreaWeighted` due to | +| | representing edges as great circle arcs rather than lines of | +| | latitude/longitude. This causes less difference at higher resolutions. | +| | This can be mitigated somewhat by using the | +| | ``src_resolution`` / ``tgt_resolution`` arguments. | ++-----------------+-------------------------------------------------------------------------+ + +ESMFBilinear +------------ + ++-----------------+---------------------------------------------------------------------+ +| **API** | :class:`~iris-esmf-regrid:esmf_regrid.schemes.ESMFBilinear` | ++-----------------+---------------------------------------------------------------------+ +| **Method** | Linear | ++-----------------+---------------------------------------------------------------------+ +| **Source Grid** | May be either: | +| | | +| | - A pair of 1D x/y coordinates on different axes. | +| | - A pair of 2D x/y coordinates on the same axes. | +| | - An unstructured mesh located on cell faces. | ++-----------------+---------------------------------------------------------------------+ +| **Target Grid** | Any of the above. May be a different type to ``src`` grid. | ++-----------------+---------------------------------------------------------------------+ +| **Coordinate | ``src`` and ``tgt`` grid may have any coordinate system or ``None``.| +| System** | | ++-----------------+---------------------------------------------------------------------+ +| **Lazy | ``True`` | +| Regridding** | | ++-----------------+---------------------------------------------------------------------+ +| **Weights | ``True`` | +| Caching** | | ++-----------------+---------------------------------------------------------------------+ + +ESMFNearest +----------- + ++-----------------+---------------------------------------------------------------------+ +| **API** | :class:`~iris-esmf-regrid:esmf_regrid.schemes.ESMFNearest` | ++-----------------+---------------------------------------------------------------------+ +| **Method** | Nearest (destination to source) | ++-----------------+---------------------------------------------------------------------+ +| **Source Grid** | May be either: | +| | | +| | - A pair of 1D x/y coordinates on different axes. | +| | - A pair of 2D x/y coordinates on the same axes. | +| | - An unstructured mesh located on cell faces | ++-----------------+---------------------------------------------------------------------+ +| **Target Grid** | Any of the above. May be a different type to ``src`` grid. | ++-----------------+---------------------------------------------------------------------+ +| **Coordinate | ``src`` and ``tgt`` grid may have any coordinate system or ``None``.| +| System** | | ++-----------------+---------------------------------------------------------------------+ +| **Lazy | ``True`` | +| Regridding** | | ++-----------------+---------------------------------------------------------------------+ +| **Weights | ``True`` | +| Caching** | | ++-----------------+---------------------------------------------------------------------+ + +.. _comparing methods: + +Comparing Methods +================= + +The various regridding algorithms are implementations of the following +methods. While there may be slight differences in the way each regridder +implements a given method, each regridder broadly follows the principles +of that method. We give here a very brief overview of what situations +each method are best suited to followed by a more detailed discussion. + +Conservative +------------ + +Good for representing the *entirety* of the underlying data. +Designed for data represented by cell faces. A fuller description of +what it means to be *conservative* can be found in the section on +`area conservation`_. + +Linear +------ + +Good for approximating data represented at *precise points* in space and in +cases where it is desirable for the resulting data to be smooth. For more +detail, see the section on `regridder smoothness`_. + +Nearest +------- + +Tends to be the fastest regridding method. Ensures each resulting data value +represents a data value in the source. Good in cases where averaging is +inappropriate, e.g., for discontinuous data. + +Point in cell +------------- + +Similarly to the conservative method, represents the entirety of the underlying +data. Works well with data whose source is an unstructured series of points. + +.. _numerical accuracy: + +Numerical Accuracy +================== + +An important thing to understand when regridding is that no regridding method +is perfect. That is to say, you will tend to lose information when you regrid +so that if you were to regrid from a source grid to a target and then back onto +the original source, you will usually end up with slightly different data. +Furthermore, statistical properties such as min, max and standard deviation are +not guaranteed to be preserved. While regridding is inherently imperfect, there +are some properties which can be better preserved by choosing the appropriate +regridding method. These include: + +.. _area conservation: + +Global Area Weighted Average +---------------------------- +Area weighted regridding schemes such as :class:`~iris.analysis.AreaWeighted` and +:class:`~iris-esmf-regrid:esmf_regrid.schemes.ESMFAreaWeighted` +use *conservative* regridding schemes. The property which these regridders +*conserve* is the global area weighted average of the data (or equivalently, +the area weighted sum). More precisely, this means that:: + + When regridding from a source cube to a target cube defined + over the same area (e.g., the entire globe), assuming there + are no masked data points, the area weighted average + (weighted by the area covered by each data point) of the + source cube ought to be equal (within minor tolerances) + to the area weighted average of the result. + +This property will be particularly important to consider if you are intending to +calculate global properties such as average temperature or total rainfall over a +given area. It may be less important if you are only interested in local behaviour, +e.g., temperature at particular locations. + +When there are masked points in your data, the same global conservative properties +no longer strictly hold. This is because the area which the unmasked points in the +source cover is no longer the same as the area covered by unmasked points in the +target. With the keyword argument ``mdtol=0`` this means that there will be an area +around the source mask which will be masked in the result and therefore unaccounted +for in the area weighted average calculation. Conversely, with the keyword argument +``mdtol=1`` there will be an unmasked area in the result that is masked in the source. +This may be particularly important if you are intending to calculate properties +which depend area e.g., calculating the total global rainfall based on data in units +of ``kg m-2`` as an area weighted sum. With ``mdtol=0`` this will consistently +underestimate this total and with ``mdtol=1`` will consistently overestimate. This can +be somewhat mitigated with a choice of ``mdtol=0.5``, but you should still be aware of +potential inaccuracies. It should be noted that this choice of ``mdtol`` is highly +context dependent and there will likely be occasions where a choice of ``mdtol=0`` or +``mdtol=1`` is more suitable. The important thing is to *know your data, know what* +*you're doing with your data and know how your regridder fits in this process*. + +.. todo:: + + add worked example + +.. _regridder smoothness: + +Data Gradient/Smoothness +------------------------ +Alternatively, rather than conserving global properties, it may be more important to +approximate each individual point of data as accurately as possible. In this case, it +may be more appropriate to use a *linear* regridder such as :class:`~iris.analysis.Linear` +or :class:`~iris-esmf-regrid:esmf_regrid.schemes.ESMFBilinear`. + +The linear method calculates each target point as the weighted average of the four +surrounding source points. This average is weighted according to how close this target +point is to the surrounding points. Notably, the value assigned to a target point varys +*continuously* with its position (as opposed to nearest neighbour regridding). + +Such regridders work best when the data in question can be considered +as a collection of measurements made at *points on a smoothly varying field*. The +difference in behaviour between linear and conservative regridders can be seen most +clearly when there is a large difference between the source and target grid resolution. + +Suppose you were regridding from a high resolution to a low resolution, if you were +regridding using a *conservative* method, each result point would be the average of many +result points. On the other hand, if you were using a *linear* method then the result +would only be the average the 4 nearest source points. This means that, while +*conservative* methods will give you a better idea of the *totality* of the source data, +*linear* methods will give you a better idea of the source data at a *particular point*. + +Conversely, suppose you were regridding from a low resolution to a high resolution. For +other regridding methods (conservative and nearest), most of the target points covered by +a given source point would have the same value and there would be a steep difference between +target points near the cell boundary. For linear regridding however, the resulting data +will vary smoothly. + +.. todo:: + + add worked example + +Consistency +----------- +As noted above, each regridding method has its own unique effect on the data. While this can +be manageable when contained within context of a particular workflow, you should take care +not to compare data which has been regrid with different regridding methods as the artefacts +of that regridding method may dominate the underlying differences. + +.. todo:: + + add worked example + +It should also be noted that some implementations of the *same method* (e.g., +:class:`~iris.analysis.Nearest` and :class:`~iris.analysis.UnstructuredNearest`) may +differ slightly and so may yield slightly different results when applied to equivalent +data. However this difference will be significantly less than the difference between +regridders based on different methods. + +.. _regridder performance: + +Performance +----------- +Regridding can be an expensive operation, but there are ways to work with regridders to +mitigate this cost. For most regridders, the regridding process can be broken down into +two steps: + +- *Preparing* the regridder by comparing the source and target grids and generating weights. +- *Performing* the regridding by applying those weights to the source data. + +Generally, the *prepare* step is the more expensive of the two. It is better to avoid +repeating this step unnecessarily. This can be done by *reusing* a regridder, as described +in the :ref:`user guide `. + +.. todo:: + + add benchmarks - note the iris and iris-esmf-regrid version diff --git a/docs/src/getting_started.rst b/docs/src/getting_started.rst new file mode 100644 index 0000000000..24299a4060 --- /dev/null +++ b/docs/src/getting_started.rst @@ -0,0 +1,15 @@ +.. _getting_started_index: + +Getting Started +=============== + +To get started with Iris we recommend reading :ref:`why_iris` was created and to +explore the examples in the :ref:`gallery_index` after :ref:`installing_iris` +Iris. + +.. toctree:: + :maxdepth: 1 + + why_iris + installing + generated/gallery/index \ No newline at end of file diff --git a/docs/src/index.rst b/docs/src/index.rst new file mode 100644 index 0000000000..a9bf76fc96 --- /dev/null +++ b/docs/src/index.rst @@ -0,0 +1,206 @@ +.. include:: common_links.inc +.. _iris_docs: + + +Iris +==== + +**A powerful, format-agnostic, community-driven Python package for analysing +and visualising Earth science data.** + +Iris implements a data model based on the `CF conventions `_ +giving you a powerful, format-agnostic interface for working with your data. +It excels when working with multi-dimensional Earth Science data, where tabular +representations become unwieldy and inefficient. + +For more information see :ref:`why_iris`. + +.. grid:: 3 + + .. grid-item-card:: + :text-align: center + :img-top: _static/icon_shuttle.svg + :class-img-top: w-50 m-auto px-1 py-2 dark-light + :shadow: lg + + Information on Iris, how to install and a gallery of examples that + create plots. + + +++ + .. button-ref:: getting_started_index + :ref-type: ref + :color: primary + :outline: + :expand: + + Getting Started + + .. grid-item-card:: + :text-align: center + :img-top: _static/icon_instructions.svg + :class-img-top: w-50 m-auto px-1 py-2 dark-light + :shadow: lg + + Learn how to use Iris, including loading, navigating, saving, + plotting and more. + + +++ + .. button-ref:: user_guide_index + :ref-type: ref + :color: primary + :outline: + :expand: + + User Guide + + .. grid-item-card:: + :text-align: center + :img-top: _static/icon_development.svg + :class-img-top: w-50 m-auto px-1 py-2 dark-light + :shadow: lg + + Information on how you can contribute to Iris as a developer. + + +++ + .. button-ref:: development_where_to_start + :ref-type: ref + :color: primary + :outline: + :expand: + + Developers Guide + + +.. grid:: 3 + + .. grid-item-card:: + :text-align: center + :img-top: _static/icon_api.svg + :class-img-top: w-50 m-auto px-1 py-2 dark-light + :shadow: lg + + Browse full Iris functionality by module. + + +++ + .. button-ref:: generated/api/iris + :ref-type: doc + :color: primary + :outline: + :expand: + + Iris API + + .. grid-item-card:: + :text-align: center + :img-top: _static/icon_new_product.svg + :class-img-top: w-50 m-auto px-1 py-2 dark-light + :shadow: lg + + Find out what has recently changed in Iris. + + +++ + .. button-ref:: iris_whatsnew + :ref-type: ref + :color: primary + :outline: + :expand: + + What's New + + .. grid-item-card:: + :text-align: center + :img-top: _static/icon_thumb.png + :class-img-top: w-50 m-auto px-1 py-2 dark-light + :shadow: lg + + Raise the profile of issues by voting on them. + + +++ + .. button-ref:: voted_issues_top + :ref-type: ref + :color: primary + :outline: + :expand: + + Voted Issues + + +Icons made by `FreePik `_ from +`Flaticon `_ + + +.. _iris_support: + +Support +~~~~~~~ + +We, the Iris developers have adopted `GitHub Discussions`_ to capture any +discussions or support questions related to Iris. + +See also `StackOverflow for "How Do I? `_ +that may be useful but we do not actively monitor this. + +The legacy support resources: + +* `Users Google Group `_ +* `Developers Google Group `_ +* `Legacy Documentation`_ (Iris 2.4 or earlier). This is an archive of zip + files of past documentation. You can download, unzip and view the + documentation locally (index.html). There may be some incorrect rendering + and older javascvript (.js) files may show a warning when uncompressing, in + which case we suggest you use a different unzip tool. + + +.. toctree:: + :caption: Getting Started + :maxdepth: 1 + :hidden: + + getting_started + + +.. toctree:: + :caption: User Guide + :maxdepth: 1 + :name: userguide_index + :hidden: + + userguide/index + + +.. toctree:: + :caption: Developers Guide + :maxdepth: 1 + :name: developers_index + :hidden: + + developers_guide/contributing_getting_involved + + +.. toctree:: + :caption: Community + :maxdepth: 1 + :name: community_index + :hidden: + + Community + + +.. toctree:: + :caption: What's New in Iris + :maxdepth: 1 + :name: whats_new_index + :hidden: + + whatsnew/index + + +.. toctree:: + :caption: Iris API + :maxdepth: 1 + :hidden: + + Iris API + + +.. todolist:: diff --git a/docs/src/installing.rst b/docs/src/installing.rst new file mode 100644 index 0000000000..a0a3fd2c62 --- /dev/null +++ b/docs/src/installing.rst @@ -0,0 +1,121 @@ +.. _installing_iris: + +Installing +========== + +Iris can be installed using conda or pip. + +.. note:: Iris is currently supported and tested against |python_support| + running on Linux. We do not currently actively test on other + platforms such as Windows or macOS. + + Windows 10 now has support for Linux distributions via WSL_ (Windows + Subsystem for Linux). This is a great option to get started with + Iris for users and contributors. Be aware that we do not currently + test against any WSL_ distributions. + +.. _WSL: https://learn.microsoft.com/en-us/windows/wsl/install + +.. note:: This documentation was built using Python |python_version|. + + +.. _installing_a_released_version: + +Installing a Released Version +----------------------------- + +.. tab-set:: + + .. tab-item:: conda-forge + + To install Iris using conda, you must first download and install conda, + for example from https://docs.conda.io/en/latest/miniconda.html. + + Once conda is installed, you can install Iris using conda with the following + command:: + + conda install -c conda-forge iris + + If you wish to run any of the code in the gallery you will also + need the Iris sample data. This can also be installed using conda:: + + conda install -c conda-forge iris-sample-data + + Further documentation on using conda and the features it provides can be found + at https://docs.conda.io/projects/conda/en/latest/index.html. + + .. tab-item:: PyPI + + Iris is also available from https://pypi.org/ so can be installed with ``pip``:: + + pip install scitools-iris + + If you wish to run any of the code in the gallery you will also + need the Iris sample data. This can also be installed using pip:: + + pip install iris-sample-data + + + +.. _installing_from_source: + +Installing a Development Version +-------------------------------- + +The latest Iris source release is available from +https://github.com/SciTools/iris. + +For instructions on how to obtain the Iris project source from GitHub see +:ref:`forking` and :ref:`set-up-fork` for instructions. + +Once conda is installed, you can create a development environment for Iris +using conda and then activate it. The example commands below assume you are in +the root directory of your local copy of Iris:: + + conda env create --file=requirements/iris.yml + conda activate iris-dev + +The ``requirements/iris.yml`` file defines the Iris development conda +environment *name* and all the relevant *top level* `conda-forge` package +dependencies that you need to **code**, **test**, and **build** the +documentation. If you wish to minimise the environment footprint, simply +remove any unwanted packages from the requirements file e.g., if you don't +intend to run the Iris tests locally or build the documentation, then remove +all the packages from the `testing` and `documentation` sections. + +.. note:: The ``requirements/iris.yml`` file will always use the latest + Iris tested Python version available. For all Python versions that + are supported and tested against by Iris, view the contents of + the `requirements`_ directory. + +.. _requirements: https://github.com/scitools/iris/tree/main/requirements + +Finally you need to run the command to configure your environment +to find your local Iris code. From your Iris directory run:: + + pip install --no-deps --editable . + + +Running the Tests +----------------- + +To ensure your setup is configured correctly you can run the test suite using +the command:: + + pytest + +For more information see :ref:`test manual env`. + + +Custom Site Configuration +------------------------- + +The default site configuration values can be overridden by creating the file +``iris/etc/site.cfg``. For example, the following snippet can be used to +specify a non-standard location for your dot executable:: + + [System] + dot_path = /usr/bin/dot + +An example configuration file is available in ``iris/etc/site.cfg.template``. +See :py:func:`iris.config` for further configuration options. diff --git a/docs/src/sphinxext/api_rst_formatting.py b/docs/src/sphinxext/api_rst_formatting.py new file mode 100644 index 0000000000..6dd82de91e --- /dev/null +++ b/docs/src/sphinxext/api_rst_formatting.py @@ -0,0 +1,36 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. + +# This script will process all .rst files that have been created by +# sphinxcontrib.apidoc extension and perform minor changes, specifically: +# +# - Remove the suffix for "package" and " module". +# + +import ntpath +from pathlib import Path + + +def main_api_rst_formatting(app): + src_dir = Path("generated/api") + + print(f"[{ntpath.basename(__file__)}] Processing RST files", end="") + + for file in src_dir.iterdir(): + if file.suffix == ".rst": + print(f".", end="") + + with open(file, "r") as f: + lines = f.read() + + lines = lines.replace(" package\n=", "\n") + lines = lines.replace(" module\n=", "\n") + + with open(file, "w") as f: + f.write(lines) + print("") + +def setup(app): + app.connect("builder-inited", main_api_rst_formatting) diff --git a/docs/iris/src/userguide/change_management_goals.txt b/docs/src/userguide/change_management_goals.txt similarity index 100% rename from docs/iris/src/userguide/change_management_goals.txt rename to docs/src/userguide/change_management_goals.txt diff --git a/docs/src/userguide/citation.rst b/docs/src/userguide/citation.rst new file mode 100644 index 0000000000..d0496f4876 --- /dev/null +++ b/docs/src/userguide/citation.rst @@ -0,0 +1,20 @@ +.. _Citing_Iris: + +=========== +Citing Iris +=========== + +If Iris played an important part in your research then please add us to your +reference list by using the recommendations below. + +Iris can be cited directly from the `GitHub repository `_ +, for more information including where to find the citation on the repo please +see the `GitHub documentation`_. + +The Iris citation does not contain the version of the software used. We +recommend that you use the version number of the release you used, and the +commit hash if you checked out a unreleased version of Iris. This will allow +others to reproduce the environment that you worked in. You can see what a +citation should look like for a particular version of Iris, on the `GitHub documentation`_. + +.. _GitHub documentation: https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-citation-files/ diff --git a/docs/src/userguide/code_maintenance.rst b/docs/src/userguide/code_maintenance.rst new file mode 100644 index 0000000000..c01c1975a7 --- /dev/null +++ b/docs/src/userguide/code_maintenance.rst @@ -0,0 +1,60 @@ +Code Maintenance +================ + +From a user point of view "code maintenance" means ensuring that your existing +working code stays working, in the face of changes to Iris. + + +Stability and Change +--------------------- + +In practice, as Iris develops, most users will want to periodically upgrade +their installed version to access new features or at least bug fixes. + +This is obvious if you are still developing other code that uses Iris, or using +code from other sources. +However, even if you have only legacy code that remains untouched, some code +maintenance effort is probably still necessary: + +* On the one hand, *in principle*, working code will go on working, as long + as you don't change anything else. + +* However, such "version stasis" can easily become a growing burden, if you + are simply waiting until an update becomes unavoidable, often that will + eventually occur when you need to update some other software component, + for some completely unconnected reason. + + +Principles of Change Management +------------------------------- + +When you upgrade software to a new version, you often find that you need to +rewrite your legacy code, simply to keep it working. + +In Iris, however, we aim to reduce code maintenance problems to an absolute +minimum by following defined change management rules. +These ensure that, *within a major release number* : + +* you can be confident that your code will still work with subsequent minor + releases + +* you will be aware of future incompatibility problems in advance + +* you can defer making code compatibility changes for some time, until it + suits you + +The above applies to minor version upgrades : e.g. code that works with version +"1.4.2" should still work with a subsequent minor release such as "1.5.0" or +"1.7.2". + +A *major* release however, e.g. "v2.0.0" or "v3.0.0", can include more +significant changes, including so-called "breaking" changes: This means that +existing code may need to be modified to make it work with the new version. + +Since breaking change can only occur at major releases, these are the *only* +times we can alter or remove existing behaviours (even deprecated +ones). This is what a major release is for : it enables the removal and +replacement of old features. + +Of course, even at a major release, we do still aim to keep breaking changes to +a minimum. diff --git a/docs/src/userguide/concat.svg b/docs/src/userguide/concat.svg new file mode 100644 index 0000000000..f32fc0030b --- /dev/null +++ b/docs/src/userguide/concat.svg @@ -0,0 +1,823 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + x + + + + + + + + + + + + + + + + + 0 + + 1 + 2 + 3 + 0 + 1 + 2 + 3 + + + + + + t + y + t + y + + + + + + + + + + + + diff --git a/docs/src/userguide/cube_maths.rst b/docs/src/userguide/cube_maths.rst new file mode 100644 index 0000000000..79c91ca61b --- /dev/null +++ b/docs/src/userguide/cube_maths.rst @@ -0,0 +1,264 @@ +.. _cube maths: + +========== +Cube Maths +========== + + +The section :doc:`navigating_a_cube` highlighted that +every cube has a data attribute; +this attribute can then be manipulated directly:: + + cube.data -= 273.15 + +The problem with manipulating the data directly is that other metadata may +become inconsistent; in this case the units of the cube are no longer what was +intended. This example could be rectified by changing the units attribute:: + + cube.units = 'celsius' + +.. note:: + + :meth:`iris.cube.Cube.convert_units` can be used to automatically convert a + cube's data and update its units attribute. + So, the two steps above can be achieved by:: + + cube.convert_units('celsius') + +In order to reduce the amount of metadata which becomes inconsistent, +fundamental arithmetic operations such as addition, subtraction, division +and multiplication can be applied directly to any cube. + +Calculating the Difference Between Two Cubes +-------------------------------------------- + +Let's load some air temperature which runs from 1860 to 2100:: + + filename = iris.sample_data_path('E1_north_america.nc') + air_temp = iris.load_cube(filename, 'air_temperature') + +We can now get the first and last time slices using indexing +(see :ref:`cube_indexing` for a reminder):: + + t_first = air_temp[0, :, :] + t_last = air_temp[-1, :, :] + +.. testsetup:: + + filename = iris.sample_data_path('E1_north_america.nc') + air_temp = iris.load_cube(filename, 'air_temperature') + t_first = air_temp[0, :, :] + t_last = air_temp[-1, :, :] + +And finally we can subtract the two. +The result is a cube of the same size as the original two time slices, +but with the data representing their difference: + + >>> print(t_last - t_first) + unknown / (K) (latitude: 37; longitude: 49) + Dimension coordinates: + latitude x - + longitude - x + Scalar coordinates: + forecast_reference_time 1859-09-01 06:00:00 + height 1.5 m + Attributes: + Conventions 'CF-1.5' + Model scenario 'E1' + source 'Data from Met Office Unified Model 6.05' + + +.. note:: + + Notice that the coordinates "time" and "forecast_period" have been removed + from the resultant cube; + this is because these coordinates differed between the two input cubes. + + +.. _cube-maths_anomaly: + +Calculating a Cube Anomaly +-------------------------- + +In section :doc:`cube_statistics` we discussed how the dimensionality of a cube +can be reduced using the :meth:`Cube.collapsed ` method +to calculate a statistic over a dimension. + +Let's use that method to calculate a mean of our air temperature time-series, +which we'll then use to calculate a time mean anomaly and highlight the powerful +benefits of cube broadcasting. + +First, let's remind ourselves of the shape of our air temperature time-series +cube:: + + >>> print(air_temp.summary(True)) + air_temperature / (K) (time: 240; latitude: 37; longitude: 49) + +Now, we'll calculate the time-series mean using the +:meth:`Cube.collapsed ` method:: + + >>> air_temp_mean = air_temp.collapsed('time', iris.analysis.MEAN) + >>> print(air_temp_mean.summary(True)) + air_temperature / (K) (latitude: 37; longitude: 49) + +As expected the *time* dimension has been collapsed, reducing the +dimensionality of the resultant *air_temp_mean* cube. This time-series mean can +now be used to calculate the time mean anomaly against the original +time-series:: + + >>> anomaly = air_temp - air_temp_mean + >>> print(anomaly.summary(True)) + unknown / (K) (time: 240; latitude: 37; longitude: 49) + +Notice that the calculation of the *anomaly* involves subtracting a +*2d* cube from a *3d* cube to yield a *3d* result. This is only possible +because cube broadcasting is performed during cube arithmetic operations. + +Cube broadcasting follows similar broadcasting rules as +`NumPy `_, but +the additional richness of Iris coordinate meta-data provides an enhanced +capability beyond the basic broadcasting behaviour of NumPy. + +As the coordinate meta-data of a cube uniquely describes each dimension, it is +possible to leverage this knowledge to identify the similar dimensions involved +in a cube arithmetic operation. This essentially means that we are no longer +restricted to performing arithmetic on cubes with identical shapes. + +This extended broadcasting behaviour is highlighted in the following +examples. The first of these shows that it is possible to involve the +transpose of the air temperature time-series in an arithmetic operation with +itself. + +Let's first create the transpose of the air temperature time-series:: + + >>> air_temp_T = air_temp.copy() + >>> air_temp_T.transpose() + >>> print(air_temp_T.summary(True)) + air_temperature / (K) (longitude: 49; latitude: 37; time: 240) + +Now add the transpose to the original time-series:: + + >>> result = air_temp + air_temp_T + >>> print(result.summary(True)) + unknown / (K) (time: 240; latitude: 37; longitude: 49) + +Notice that the *result* is the same dimensionality and shape as *air_temp*. +Let's check that the arithmetic operation has calculated a result that +we would intuitively expect:: + + >>> result == 2 * air_temp + True + +Let's extend this example slightly, by taking a slice from the middle +*latitude* dimension of the transpose cube:: + + >>> air_temp_T_slice = air_temp_T[:, 0, :] + >>> print(air_temp_T_slice.summary(True)) + air_temperature / (K) (longitude: 49; time: 240) + +Compared to our original time-series, the *air_temp_T_slice* cube has one +less dimension *and* its shape is different. However, this doesn't prevent +us from performing cube arithmetic with it, thanks to the extended cube +broadcasting behaviour:: + + >>> result = air_temp - air_temp_T_slice + >>> print(result.summary(True)) + unknown / (K) (time: 240; latitude: 37; longitude: 49) + + +.. seealso:: + + Relevant gallery example: + :ref:`sphx_glr_generated_gallery_general_plot_anomaly_log_colouring.py` (Anomaly) + +Combining Multiple Phenomena to Form a New One +---------------------------------------------- + +Combining cubes of potential-temperature and pressure we can calculate +the associated temperature using the equation: + +.. math:: + + T = \theta (\frac{p}{p_0}) ^ {(287.05 / 1005)} + +Where :math:`p` is pressure, :math:`\theta` is potential temperature, +:math:`p_0` is the potential temperature reference pressure +and :math:`T` is temperature. + +First, let's load pressure and potential temperature cubes:: + + filename = iris.sample_data_path('colpex.pp') + phenomenon_names = ['air_potential_temperature', 'air_pressure'] + pot_temperature, pressure = iris.load_cubes(filename, phenomenon_names) + +In order to calculate :math:`\frac{p}{p_0}` we can define a coordinate which +represents the standard reference pressure of 1000 hPa:: + + import iris.coords + p0 = iris.coords.AuxCoord(1000.0, + long_name='reference_pressure', + units='hPa') + +We must ensure that the units of ``pressure`` and ``p0`` are the same, +so convert the newly created coordinate using +the :meth:`iris.coords.Coord.convert_units` method:: + + p0.convert_units(pressure.units) + +Now we can combine all of this information to calculate the air temperature +using the equation above:: + + temperature = pot_temperature * ( (pressure / p0) ** (287.05 / 1005) ) + +Finally, the cube we have created needs to be given a suitable name:: + + temperature.rename('air_temperature') + +The result could now be plotted using the guidance provided in the +:doc:`plotting_a_cube` section. + +.. only:: html + + A very similar example to this can be found in + :ref:`sphx_glr_generated_gallery_meteorology_plot_deriving_phenomena.py`. + +.. only:: latex + + A very similar example to this can be found in the examples section, + with the title "Deriving Exner Pressure and Air Temperature". + +.. _cube_maths_combining_units: + +Combining Units +--------------- + +It should be noted that when combining cubes by multiplication, division or +power operations, the resulting cube will have a unit which is an appropriate +combination of the constituent units. In the above example, since ``pressure`` +and ``p0`` have the same unit, then ``pressure / p0`` has a dimensionless +unit of ``'1'``. Since ``(pressure / p0)`` has a unit of ``'1'``, this does +not change under power operations and so +``( (pressure / p0) ** (287.05 / 1005) )`` also has unit ``1``. Multiplying +by a cube with unit ``'1'`` will preserve units, so the cube ``temperature`` +will be given the same units as are in ``pot_temperature``. It should be +noted that some combinations of units, particularly those involving power +operations, will not result in a valid unit and will cause the calculation +to fail. For example, if a cube ``a`` had units ``'m'`` then ``a ** 0.5`` +would result in an error since the square root of a meter has no meaningful +unit (if ``a`` had units ``'m2'`` then ``a ** 0.5`` would result in a cube +with units ``'m'``). + +Iris inherits units from `cf_units `_ +which in turn inherits from `UDUNITS `_. +As well as the units UDUNITS provides, cf units also provides the units +``'no-unit'`` and ``'unknown'``. A unit of ``'no-unit'`` means that the +associated data is not suitable for describing with a unit, cf units +considers ``'no-unit'`` unsuitable for combining and therefore any +arithmetic done on a cube with ``'no-unit'`` will fail. A unit of +``'unknown'`` means that the unit describing the associated data +cannot be determined. cf units and Iris will allow arithmetic on cubes +with a unit of ``'unknown'``, but the resulting cube will always have +a unit of ``'unknown'``. If a calculation is prevented because it would +result in inappropriate units, it may be forced by setting the units of +the original cubes to be ``'unknown'``. + diff --git a/docs/src/userguide/cube_statistics.rst b/docs/src/userguide/cube_statistics.rst new file mode 100644 index 0000000000..fb389a5229 --- /dev/null +++ b/docs/src/userguide/cube_statistics.rst @@ -0,0 +1,425 @@ +.. _cube-statistics: + +=============== +Cube Statistics +=============== + +.. seealso:: + + Relevant gallery example: + :ref:`sphx_glr_generated_gallery_general_plot_zonal_means.py` (Collapsing) + +.. _cube-statistics-collapsing: + +Collapsing Entire Data Dimensions +--------------------------------- + +.. testsetup:: collapsing + + import iris + filename = iris.sample_data_path('uk_hires.pp') + cube = iris.load_cube(filename, 'air_potential_temperature') + + import iris.analysis.cartography + cube.coord('grid_latitude').guess_bounds() + cube.coord('grid_longitude').guess_bounds() + grid_areas = iris.analysis.cartography.area_weights(cube) + + +In the :doc:`subsetting_a_cube` section we saw how to extract a subset of a +cube in order to reduce either its dimensionality or its resolution. +Instead of simply extracting a sub-region of the data, +we can produce statistical functions of the data values +across a particular dimension, +such as a 'mean over time' or 'minimum over latitude'. + +.. _cube-statistics_forecast_printout: + +For instance, suppose we have a cube: + + >>> import iris + >>> filename = iris.sample_data_path('uk_hires.pp') + >>> cube = iris.load_cube(filename, 'air_potential_temperature') + >>> print(cube) + air_potential_temperature / (K) (time: 3; model_level_number: 7; grid_latitude: 204; grid_longitude: 187) + Dimension coordinates: + time x - - - + model_level_number - x - - + grid_latitude - - x - + grid_longitude - - - x + Auxiliary coordinates: + forecast_period x - - - + level_height - x - - + sigma - x - - + surface_altitude - - x x + Derived coordinates: + altitude - x x x + Scalar coordinates: + forecast_reference_time 2009-11-19 04:00:00 + Attributes: + STASH m01s00i004 + source 'Data from Met Office Unified Model' + um_version '7.3' + + +In this case we have a 4 dimensional cube; +to mean the vertical (z) dimension down to a single valued extent +we can pass the coordinate name and the aggregation definition to the +:meth:`Cube.collapsed() ` method: + + >>> import iris.analysis + >>> vertical_mean = cube.collapsed('model_level_number', iris.analysis.MEAN) + >>> print(vertical_mean) + air_potential_temperature / (K) (time: 3; grid_latitude: 204; grid_longitude: 187) + Dimension coordinates: + time x - - + grid_latitude - x - + grid_longitude - - x + Auxiliary coordinates: + forecast_period x - - + surface_altitude - x x + Derived coordinates: + altitude - x x + Scalar coordinates: + forecast_reference_time 2009-11-19 04:00:00 + level_height 696.6666 m, bound=(0.0, 1393.3333) m + model_level_number 10, bound=(1, 19) + sigma 0.92292976, bound=(0.8458596, 1.0) + Cell methods: + 0 model_level_number: mean + Attributes: + STASH m01s00i004 + source 'Data from Met Office Unified Model' + um_version '7.3' + + +Similarly other analysis operators such as ``MAX``, ``MIN`` and ``STD_DEV`` +can be used instead of ``MEAN``, see :mod:`iris.analysis` for a full list +of currently supported operators. + +For an example of using this functionality, the +:ref:`sphx_glr_generated_gallery_meteorology_plot_hovmoller.py` +example found +in the gallery takes a zonal mean of an ``XYT`` cube by using the +``collapsed`` method with ``latitude`` and ``iris.analysis.MEAN`` as arguments. + +.. _cube-statistics-collapsing-average: + +Area Averaging +^^^^^^^^^^^^^^ + +Some operators support additional keywords to the ``cube.collapsed`` method. +For example, :func:`iris.analysis.MEAN ` supports +a weights keyword which can be combined with +:func:`iris.analysis.cartography.area_weights` to calculate an area average. + +Let's use the same data as was loaded in the previous example. +Since ``grid_latitude`` and ``grid_longitude`` were both point coordinates +we must guess bound positions for them +in order to calculate the area of the grid boxes:: + + import iris.analysis.cartography + cube.coord('grid_latitude').guess_bounds() + cube.coord('grid_longitude').guess_bounds() + grid_areas = iris.analysis.cartography.area_weights(cube) + +These areas can now be passed to the ``collapsed`` method as weights: + +.. doctest:: collapsing + + >>> new_cube = cube.collapsed(['grid_longitude', 'grid_latitude'], iris.analysis.MEAN, weights=grid_areas) + >>> print(new_cube) + air_potential_temperature / (K) (time: 3; model_level_number: 7) + Dimension coordinates: + time x - + model_level_number - x + Auxiliary coordinates: + forecast_period x - + level_height - x + sigma - x + Derived coordinates: + altitude - x + Scalar coordinates: + forecast_reference_time 2009-11-19 04:00:00 + grid_latitude 1.5145501 degrees, bound=(0.13755022, 2.89155) degrees + grid_longitude 358.74948 degrees, bound=(357.48724, 360.01172) degrees + surface_altitude 399.625 m, bound=(-14.0, 813.25) m + Cell methods: + 0 grid_longitude: grid_latitude: mean + Attributes: + STASH m01s00i004 + source 'Data from Met Office Unified Model' + um_version '7.3' + +Several examples of area averaging exist in the gallery which may be of interest, +including an example on taking a :ref:`global area-weighted mean +`. + +In addition to plain arrays, weights can also be given as cubes or (names of) +:meth:`~iris.cube.Cube.coords`, :meth:`~iris.cube.Cube.cell_measures`, or +:meth:`~iris.cube.Cube.ancillary_variables`. +This has the advantage of correct unit handling, e.g., for area-weighted sums +the units of the resulting cube are multiplied by an area unit: + +.. doctest:: collapsing + + >>> from iris.coords import CellMeasure + >>> cell_areas = CellMeasure( + ... grid_areas, + ... standard_name='cell_area', + ... units='m2', + ... measure='area', + ... ) + >>> cube.add_cell_measure(cell_areas, (0, 1, 2, 3)) + >>> area_weighted_sum = cube.collapsed( + ... ['grid_longitude', 'grid_latitude'], + ... iris.analysis.SUM, + ... weights='cell_area' + ... ) + >>> print(area_weighted_sum) + air_potential_temperature / (m2.K) (time: 3; model_level_number: 7) + Dimension coordinates: + time x - + model_level_number - x + Auxiliary coordinates: + forecast_period x - + level_height - x + sigma - x + Derived coordinates: + altitude - x + Scalar coordinates: + forecast_reference_time 2009-11-19 04:00:00 + grid_latitude 1.5145501 degrees, bound=(0.13755022, 2.89155) degrees + grid_longitude 358.74948 degrees, bound=(357.48724, 360.01172) degrees + surface_altitude 399.625 m, bound=(-14.0, 813.25) m + Cell methods: + 0 grid_longitude: grid_latitude: sum + Attributes: + STASH m01s00i004 + source 'Data from Met Office Unified Model' + um_version '7.3' + +.. _cube-statistics-aggregated-by: + +Partially Reducing Data Dimensions +---------------------------------- + +Instead of completely collapsing a dimension, other methods can be applied +to reduce or filter the number of data points of a particular dimension. + + +Aggregation of Grouped Data +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The :meth:`Cube.aggregated_by ` operation +combines data for all points with the same value of a given coordinate. +To do this, you need a coordinate whose points take on only a limited set +of different values -- the *number* of these then determines the size of the +reduced dimension. +The :mod:`iris.coord_categorisation` module can be used to make such +'categorical' coordinates out of ordinary ones: The most common use is +to aggregate data over regular *time intervals*, +such as by calendar month or day of the week. + +For example, let's create two new coordinates on the cube +to represent the climatological seasons and the season year respectively:: + + import iris + import iris.coord_categorisation + + filename = iris.sample_data_path('ostia_monthly.nc') + cube = iris.load_cube(filename, 'surface_temperature') + + iris.coord_categorisation.add_season(cube, 'time', name='clim_season') + iris.coord_categorisation.add_season_year(cube, 'time', name='season_year') + + + +.. note:: + + The 'season year' is not the same as year number, because (e.g.) the months + Dec11, Jan12 + Feb12 all belong to 'DJF-12'. + See :meth:`iris.coord_categorisation.add_season_year`. + + +.. testsetup:: aggregation + + import datetime + import iris + + filename = iris.sample_data_path('ostia_monthly.nc') + cube = iris.load_cube(filename, 'surface_temperature') + + import iris.coord_categorisation + iris.coord_categorisation.add_season(cube, 'time', name='clim_season') + iris.coord_categorisation.add_season_year(cube, 'time', name='season_year') + + annual_seasonal_mean = cube.aggregated_by( + ['clim_season', 'season_year'], + iris.analysis.MEAN) + + +Printing this cube now shows that two extra coordinates exist on the cube: + +.. doctest:: aggregation + + >>> print(cube) + surface_temperature / (K) (time: 54; latitude: 18; longitude: 432) + Dimension coordinates: + time x - - + latitude - x - + longitude - - x + Auxiliary coordinates: + clim_season x - - + forecast_reference_time x - - + season_year x - - + Scalar coordinates: + forecast_period 0 hours + Cell methods: + 0 month: year: mean + Attributes: + Conventions 'CF-1.5' + STASH m01s00i024 + + +These two coordinates can now be used to aggregate by season and climate-year: + +.. doctest:: aggregation + + >>> annual_seasonal_mean = cube.aggregated_by( + ... ['clim_season', 'season_year'], + ... iris.analysis.MEAN) + >>> print(repr(annual_seasonal_mean)) + + +The primary change in the cube is that the cube's data has been +reduced in the 'time' dimension by aggregation (taking means, in this case). +This has collected together all data points with the same values of season and +season-year. +The results are now indexed by the 19 different possible values of season and +season-year in a new, reduced 'time' dimension. + +We can see this by printing the first 10 values of season+year +from the original cube: These points are individual months, +so adjacent ones are often in the same season: + +.. doctest:: aggregation + :options: +NORMALIZE_WHITESPACE + + >>> for season, year in zip(cube.coord('clim_season')[:10].points, + ... cube.coord('season_year')[:10].points): + ... print(season + ' ' + str(year)) + mam 2006 + mam 2006 + jja 2006 + jja 2006 + jja 2006 + son 2006 + son 2006 + son 2006 + djf 2007 + djf 2007 + +Compare this with the first 10 values of the new cube's coordinates: +All the points now have distinct season+year values: + +.. doctest:: aggregation + :options: +NORMALIZE_WHITESPACE + + >>> for season, year in zip( + ... annual_seasonal_mean.coord('clim_season')[:10].points, + ... annual_seasonal_mean.coord('season_year')[:10].points): + ... print(season + ' ' + str(year)) + mam 2006 + jja 2006 + son 2006 + djf 2007 + mam 2007 + jja 2007 + son 2007 + djf 2008 + mam 2008 + jja 2008 + +Because the original data started in April 2006 we have some incomplete seasons +(e.g. there were only two months worth of data for 'mam-2006'). +In this case we can fix this by removing all of the resultant 'times' which +do not cover a three month period (note: judged here as > 3*28 days): + +.. doctest:: aggregation + + >>> tdelta_3mth = datetime.timedelta(hours=3*28*24.0) + >>> spans_three_months = lambda t: (t.bound[1] - t.bound[0]) > tdelta_3mth + >>> three_months_bound = iris.Constraint(time=spans_three_months) + >>> full_season_means = annual_seasonal_mean.extract(three_months_bound) + >>> full_season_means + + +The final result now represents the seasonal mean temperature for 17 seasons +from jja-2006 to jja-2010: + +.. doctest:: aggregation + :options: +NORMALIZE_WHITESPACE + + >>> for season, year in zip(full_season_means.coord('clim_season').points, + ... full_season_means.coord('season_year').points): + ... print(season + ' ' + str(year)) + jja 2006 + son 2006 + djf 2007 + mam 2007 + jja 2007 + son 2007 + djf 2008 + mam 2008 + jja 2008 + son 2008 + djf 2009 + mam 2009 + jja 2009 + son 2009 + djf 2010 + mam 2010 + jja 2010 + +Moreover, :meth:`Cube.aggregated_by ` supports +weighted aggregation. +For example, this is helpful for an aggregation over a monthly time +coordinate that consists of months with different numbers of days. +Similar to :meth:`Cube.collapsed `, weights can be +given as arrays, cubes, or as (names of) :meth:`~iris.cube.Cube.coords`, +:meth:`~iris.cube.Cube.cell_measures`, or +:meth:`~iris.cube.Cube.ancillary_variables`. +When weights are not given as arrays, units are correctly handled for weighted +sums, i.e., the original unit of the cube is multiplied by the units of the +weights. +The following example shows a weighted sum (notice the change of the units): + +.. doctest:: aggregation + + >>> from iris.coords import AncillaryVariable + >>> time_weights = AncillaryVariable( + ... cube.coord("time").bounds[:, 1] - cube.coord("time").bounds[:, 0], + ... long_name="Time Weights", + ... units="hours", + ... ) + >>> cube.add_ancillary_variable(time_weights, 0) + >>> seasonal_sum = cube.aggregated_by("clim_season", iris.analysis.SUM, weights="Time Weights") + >>> print(seasonal_sum) + surface_temperature / (3600 s.K) (-- : 4; latitude: 18; longitude: 432) + Dimension coordinates: + latitude - x - + longitude - - x + Auxiliary coordinates: + clim_season x - - + forecast_reference_time x - - + season_year x - - + time x - - + Scalar coordinates: + forecast_period 0 hours + Cell methods: + 0 month: year: mean + 1 clim_season: sum + Attributes: + Conventions 'CF-1.5' + STASH m01s00i024 diff --git a/docs/src/userguide/glossary.rst b/docs/src/userguide/glossary.rst new file mode 100644 index 0000000000..6ab93125bd --- /dev/null +++ b/docs/src/userguide/glossary.rst @@ -0,0 +1,214 @@ +.. include:: ../common_links.inc + +.. _glossary: + +Glossary +============= + +.. glossary:: + + Cartopy + A python package for producing maps, and other geospatial data. + Allows plotting on these maps, over a range of projections. + + | **Related:** :term:`Matplotlib` + | **More information:** `CartoPy Site `_ + | + + CF Conventions + Rules for storing meteorological Climate and Forecast data in + :term:`NetCDF Format` files, defining a standard metadata format to + describe what the data is. + This also forms the data model which iris is based on. + + | **Related:** :term:`NetCDF Format` + | **More information:** `CF Conventions `_ + | + + Coordinate + A container for data points, comes in three main flavours. + + - Dimensional Coordinate - + A coordinate that describes a single data dimension of a cube. + They can only contain numerical values, in a sorted order (ascending + or descending). + - Auxiliary Coordinate - + A coordinate that can map to multiple data dimensions. Can + contain any type of data. + - Scalar Coordinate - + A coordinate that is not mapped to any data dimension, instead + representing the cube as a whole. + + | **Related:** :term:`Cube` + | **More information:** :doc:`iris_cubes` + | + + Cube + Cubes are the main method of storing data in Iris. A cube can consist of: + + - Array of :term:`Phenomenon` Data (Required) + - :term:`Coordinates ` + - :term:`Standard Name` + - :term:`Long Name` + - :term:`Unit` + - :term:`Cell Methods ` + - :term:`Coordinate Factories ` + + | **Related:** :term:`NumPy` + | **More information:** :doc:`iris_cubes` + | + + Cell Method + A cell method represents that a cube's data has been derived from + a past statistical operation, such as a + MEAN or SUM operation. + + | **Related:** :term:`Cube` + | **More information:** :doc:`iris_cubes` + | + + Coordinate Factory + A coordinate factory derives coordinates (sometimes referred to as + derived coordinates) from the values of existing coordinates. + E.g. A hybrid height factory might use "height above sea level" + and "height at ground level" coordinate data to calculate a + "height above ground level" coordinate. + + | **Related:** :term:`Cube` + | **More information:** :doc:`iris_cubes` + | + + + Dask + A data analytics python library. Iris predominantly uses Dask Arrays; + a collection of NumPy-esque arrays. The data is operated in batches, + so that not all data is in RAM at once. + + | **Related:** :term:`Lazy Data` **|** :term:`NumPy` + | **More information:** :doc:`real_and_lazy_data` + | + + Fields File (FF) Format + A meteorological file format, the output of the Unified Model. + + | **Related:** :term:`GRIB Format` + **|** :term:`Post Processing (PP) Format` **|** :term:`NetCDF Format` + | **More information:** `Unified Model `_ + | + + GRIB Format + A WMO-standard meteorological file format. + + | **Related:** :term:`Fields File (FF) Format` + **|** :term:`Post Processing (PP) Format` **|** :term:`NetCDF Format` + | **More information:** `GRIB 1 User Guide `_ + **|** `GRIB 2 User Guide.pdf `_ + | + + Lazy Data + Data stored in hard drive, and then temporarily loaded into RAM in + batches when needed. Allows of less memory usage and faster performance, + thanks to parallel processing. + + | **Related:** :term:`Dask` **|** :term:`Real Data` + | **More information:** :doc:`real_and_lazy_data` + | + + Long Name + A name describing a :term:`phenomenon`, not limited to the + the same restraints as :term:`standard name`. + + | **Related:** :term:`Standard Name` **|** :term:`Cube` + | **More information:** :doc:`iris_cubes` + | + + Matplotlib + A python package for plotting and projecting data in a wide variety + of formats. + + | **Related:** :term:`CartoPy` **|** :term:`NumPy` + | **More information:** `matplotlib`_ + | + + Metadata + The information which describes a phenomenon. + Within Iris specifically, all information which + distinguishes one phenomenon from another, + e.g. :term:`units ` or :term:`Cell Methods ` + + | **Related:** :term:`Phenomenon` **|** :term:`Cube` + | **More information:** :doc:`../further_topics/metadata` + | + + NetCDF Format + A flexible file format for storing multi-dimensional array-like data. + When Iris loads this format, it also especially recognises and interprets data + encoded according to the :term:`CF Conventions`. + + __ `NetCDF4`_ + + | **Related:** :term:`Fields File (FF) Format` + **|** :term:`GRIB Format` **|** :term:`Post Processing (PP) Format` + | **More information:** `NetCDF-4 Python Git`__ + | + + NumPy + A mathematical Python library, predominantly based around + multi-dimensional arrays. + + | **Related:** :term:`Dask` **|** :term:`Cube` + **|** :term:`Xarray` + | **More information:** `NumPy.org `_ + | + + Phenomenon + The primary data which is measured, usually within a cube, e.g. + air temperature. + + | **Related:** :term:`Metadata` + **|** :term:`Standard Name` **|** :term:`Cube` + | **More information:** :doc:`iris_cubes` + | + + Post Processing (PP) Format + A meteorological file format, created from a post processed + :term:`Fields File (FF) Format`. + + | **Related:** :term:`GRIB Format` **|** :term:`NetCDF Format` + | **More information:** `PP Wikipedia Page `_ + | + + Real Data + Data that has been loaded into RAM, as opposed to sitting + on the hard drive. + + | **Related:** :term:`Lazy Data` **|** :term:`NumPy` + | **More information:** :doc:`real_and_lazy_data` + | + + Standard Name + A name describing a :term:`phenomenon`, one from a fixed list + defined at `CF Standard Names `_. + + | **Related:** :term:`Long Name` **|** :term:`Cube` + | **More information:** :doc:`iris_cubes` + | + + Unit + The unit with which the :term:`phenomenon` is measured e.g. m / sec. + + | **Related:** :term:`Cube` + | **More information:** :doc:`iris_cubes` + | + + Xarray + A python library for sophisticated labelled multi-dimensional operations. + Has a broader scope than Iris - it is not focused on meteorological data. + + | **Related:** :term:`NumPy` + | **More information:** `Xarray Documentation `_ + | + +---- + +`To top `_ diff --git a/docs/src/userguide/index.rst b/docs/src/userguide/index.rst new file mode 100644 index 0000000000..d986a986ad --- /dev/null +++ b/docs/src/userguide/index.rst @@ -0,0 +1,45 @@ +.. _user_guide_index: +.. _user_guide_introduction: + +User Guide +========== + +If you are reading this user guide for the first time it is strongly +recommended that you read the user guide fully before experimenting with your +own data files. + +Much of the content has supplementary links to the reference documentation; +you will not need to follow these links in order to understand the guide but +they may serve as a useful reference for future exploration. + +.. only:: html + + Since later pages depend on earlier ones, try reading this user guide + sequentially using the ``next`` and ``previous`` links at the bottom + of each page. + +.. note:: + + There is also useful learning material held in the + https://github.com/scitools-classroom repo, including tutorials, courses + and presentations. + + +.. toctree:: + :maxdepth: 2 + + iris_cubes + loading_iris_cubes + saving_iris_cubes + navigating_a_cube + subsetting_a_cube + real_and_lazy_data + plotting_a_cube + interpolation_and_regridding + merge_and_concat + cube_statistics + cube_maths + citation + code_maintenance + glossary + ../further_topics/index diff --git a/docs/src/userguide/interpolation_and_regridding.rst b/docs/src/userguide/interpolation_and_regridding.rst new file mode 100644 index 0000000000..571c43bf0e --- /dev/null +++ b/docs/src/userguide/interpolation_and_regridding.rst @@ -0,0 +1,460 @@ +.. _interpolation_and_regridding: + +.. testsetup:: * + + import numpy as np + import iris + import warnings + warnings.simplefilter('ignore') + +================================= +Cube Interpolation and Regridding +================================= + +Iris provides powerful cube-aware interpolation and regridding functionality, +exposed through Iris cube methods. This functionality is provided by building +upon existing interpolation schemes implemented by SciPy. + +In Iris we refer to the available types of interpolation and regridding as +`schemes`. The following are the interpolation schemes that are currently +available in Iris: + +* linear interpolation (:class:`iris.analysis.Linear`), and +* nearest-neighbour interpolation (:class:`iris.analysis.Nearest`). + +The following are the regridding schemes that are currently available in Iris: + +* linear regridding (:class:`iris.analysis.Linear`), +* nearest-neighbour regridding (:class:`iris.analysis.Nearest` and :class:`iris.analysis.UnstructuredNearest`), +* point in cell regridding (:class:`iris.analysis.PointInCell`) and +* area-weighted regridding (:class:`iris.analysis.AreaWeighted`, first-order conservative). + +The linear, nearest-neighbor, and area-weighted regridding schemes support +lazy regridding, i.e. if the source cube has lazy data, the resulting cube +will also have lazy data. +See :doc:`real_and_lazy_data` for an introduction to lazy data. +See :doc:`../further_topics/which_regridder_to_use` for a more in depth overview of the different regridders. + + +.. _interpolation: + +Interpolation +------------- + +Interpolating a cube is achieved with the :meth:`~iris.cube.Cube.interpolate` +method. This method expects two arguments: + +#. the sample points to interpolate, and +#. the interpolation scheme to use. + +The result is a new cube, interpolated at the sample points. + +Sample points must be defined as an iterable of ``(coord, value(s))`` pairs. +The `coord` argument can be either a coordinate name or coordinate instance. +The specified coordinate must exist on the cube being interpolated! For example: + +* coordinate names and scalar sample points: ``[('latitude', 51.48), ('longitude', 0)]``, +* a coordinate instance and a scalar sample point: ``[(cube.coord('latitude'), 51.48)]``, and +* a coordinate name and a NumPy array of sample points: ``[('longitude', np.linspace(-11, 2, 14))]`` + +are all examples of valid sample points. + +The values for coordinates that correspond to date/times can be supplied as +datetime.datetime or cftime.datetime instances, +e.g. ``[('time', datetime.datetime(2009, 11, 19, 10, 30))]``). + +Let's take the air temperature cube we've seen previously: + + >>> air_temp = iris.load_cube(iris.sample_data_path('air_temp.pp')) + >>> print(air_temp) + air_temperature / (K) (latitude: 73; longitude: 96) + Dimension coordinates: + latitude x - + longitude - x + Scalar coordinates: + forecast_period 6477 hours, bound=(-28083.0, 6477.0) hours + forecast_reference_time 1998-03-01 03:00:00 + pressure 1000.0 hPa + time 1998-12-01 00:00:00, bound=(1994-12-01 00:00:00, 1998-12-01 00:00:00) + Cell methods: + 0 time: mean within years + 1 time: mean over years + Attributes: + STASH m01s16i203 + source 'Data from Met Office Unified Model' + +We can interpolate specific values from the coordinates of the cube: + + >>> sample_points = [('latitude', 51.48), ('longitude', 0)] + >>> print(air_temp.interpolate(sample_points, iris.analysis.Linear())) + air_temperature / (K) (scalar cube) + Scalar coordinates: + forecast_period 6477 hours, bound=(-28083.0, 6477.0) hours + forecast_reference_time 1998-03-01 03:00:00 + latitude 51.48 degrees + longitude 0 degrees + pressure 1000.0 hPa + time 1998-12-01 00:00:00, bound=(1994-12-01 00:00:00, 1998-12-01 00:00:00) + Cell methods: + 0 time: mean within years + 1 time: mean over years + Attributes: + STASH m01s16i203 + source 'Data from Met Office Unified Model' + +As we can see, the resulting cube is scalar and has longitude and latitude coordinates with +the values defined in our sample points. + +It isn't necessary to specify sample points for every dimension, only those that you +wish to interpolate over: + + >>> result = air_temp.interpolate([('longitude', 0)], iris.analysis.Linear()) + >>> print('Original: ' + air_temp.summary(shorten=True)) + Original: air_temperature / (K) (latitude: 73; longitude: 96) + >>> print('Interpolated: ' + result.summary(shorten=True)) + Interpolated: air_temperature / (K) (latitude: 73) + +The sample points for a coordinate can be an array of values. When multiple coordinates are +provided with arrays instead of scalar sample points, the coordinates on the resulting cube +will be orthogonal: + + >>> sample_points = [('longitude', np.linspace(-11, 2, 14)), + ... ('latitude', np.linspace(48, 60, 13))] + >>> result = air_temp.interpolate(sample_points, iris.analysis.Linear()) + >>> print(result.summary(shorten=True)) + air_temperature / (K) (latitude: 13; longitude: 14) + + +Interpolating Non-Horizontal Coordinates +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Interpolation in Iris is not limited to horizontal-spatial coordinates - any +coordinate satisfying the prerequisites of the chosen scheme may be interpolated +over. + +For instance, the :class:`iris.analysis.Linear` scheme requires 1D numeric, +monotonic, coordinates. Supposing we have a single column cube such as +the one defined below: + + >>> cube = iris.load_cube(iris.sample_data_path('hybrid_height.nc'), 'air_potential_temperature') + >>> column = cube[:, 0, 0] + >>> print(column.summary(shorten=True)) + air_potential_temperature / (K) (model_level_number: 15) + +This cube has a "hybrid-height" vertical coordinate system, meaning that the vertical +coordinate is unevenly spaced in altitude: + + >>> print(column.coord('altitude').points) + [ 418.69836 434.5705 456.7928 485.3665 520.2933 561.5752 + 609.2145 663.2141 723.57697 790.30664 863.4072 942.8823 + 1028.737 1120.9764 1219.6051 ] + +We could regularise the vertical coordinate by defining 10 equally spaced altitude +sample points between 400 and 1250 and interpolating our vertical coordinate onto +these sample points: + + >>> sample_points = [('altitude', np.linspace(400, 1250, 10))] + >>> new_column = column.interpolate(sample_points, iris.analysis.Linear()) + >>> print(new_column.summary(shorten=True)) + air_potential_temperature / (K) (model_level_number: 10) + +Let's look at the original data, the interpolation line and +the new data in a plot. This will help us to see what is going on: + +.. plot:: userguide/regridding_plots/interpolate_column.py + +The red diamonds on the extremes of the altitude values show that we have +extrapolated data beyond the range of the original data. In some cases this is +desirable but in other cases it is not. For example, this column defines +a surface altitude value of 414m, so extrapolating an "air potential temperature" +at 400m makes little physical sense in this case. + +We can control the extrapolation mode when defining the interpolation scheme. +Controlling the extrapolation mode allows us to avoid situations like the above where +extrapolating values makes little physical sense. + +The extrapolation mode is controlled by the ``extrapolation_mode`` keyword. +For the available interpolation schemes available in Iris, the ``extrapolation_mode`` +keyword must be one of: + +* ``extrapolate`` -- the extrapolation points will be calculated by extending the gradient of the closest two points, +* ``error`` -- a ValueError exception will be raised, notifying an attempt to extrapolate, +* ``nan`` -- the extrapolation points will be be set to NaN, +* ``mask`` -- the extrapolation points will always be masked, even if the source data is not a MaskedArray, or +* ``nanmask`` -- if the source data is a MaskedArray the extrapolation points will be masked. Otherwise they will be set to NaN. + +Using an extrapolation mode is achieved by constructing an interpolation scheme +with the extrapolation mode keyword set as required. The constructed scheme +is then passed to the :meth:`~iris.cube.Cube.interpolate` method. +For example, to mask values that lie beyond the range of the original data: + + >>> scheme = iris.analysis.Linear(extrapolation_mode='mask') + >>> new_column = column.interpolate(sample_points, scheme) + >>> print(new_column.coord('altitude').points) + [-- 494.44451904296875 588.888916015625 683.333251953125 777.77783203125 + 872.2222290039062 966.666748046875 1061.111083984375 1155.555419921875 --] + + +.. _caching_an_interpolator: + +Caching an Interpolator +^^^^^^^^^^^^^^^^^^^^^^^ + +If you need to interpolate a cube on multiple sets of sample points you can +'cache' an interpolator to be used for each of these interpolations. This can +shorten the execution time of your code as the most computationally +intensive part of an interpolation is setting up the interpolator. + +To cache an interpolator you must set up an interpolator scheme and call the +scheme's interpolator method. The interpolator method takes as arguments: + +#. a cube to be interpolated, and +#. an iterable of coordinate names or coordinate instances of the coordinates that are to be interpolated over. + +For example: + + >>> air_temp = iris.load_cube(iris.sample_data_path('air_temp.pp')) + >>> interpolator = iris.analysis.Nearest().interpolator(air_temp, ['latitude', 'longitude']) + +When this cached interpolator is called you must pass it an iterable of sample points +that have the same form as the iterable of coordinates passed to the constructor. +So, to use the cached interpolator defined above: + + >>> latitudes = np.linspace(48, 60, 13) + >>> longitudes = np.linspace(-11, 2, 14) + >>> for lat, lon in zip(latitudes, longitudes): + ... result = interpolator([lat, lon]) + +In each case ``result`` will be a cube interpolated from the ``air_temp`` cube we +passed to interpolator. + +Note that you must specify the required extrapolation mode when setting up the cached interpolator. +For example:: + + >>> interpolator = iris.analysis.Nearest(extrapolation_mode='nan').interpolator(cube, coords) + + +.. _regridding: + +Regridding +---------- + +Regridding is conceptually a very similar process to interpolation in Iris. +The primary difference is that interpolation is based on sample points, while +regridding is based on the **horizontal** grid of *another cube*. + +Regridding a cube is achieved with the :meth:`cube.regrid() ` method. +This method expects two arguments: + +#. *another cube* that defines the target grid onto which the cube should be regridded, and +#. the regridding scheme to use. + +.. note:: + + Regridding is a common operation needed to allow comparisons of data on different grids. + The powerful mapping functionality provided by cartopy, however, means that regridding + is often not necessary if performed just for visualisation purposes. + +Let's load two cubes that have different grids and coordinate systems: + + >>> global_air_temp = iris.load_cube(iris.sample_data_path('air_temp.pp')) + >>> rotated_psl = iris.load_cube(iris.sample_data_path('rotated_pole.nc')) + +We can visually confirm that they are on different grids by plotting the two cubes: + +.. plot:: userguide/regridding_plots/regridding_plot.py + +Let's regrid the ``global_air_temp`` cube onto a rotated pole grid +using a linear regridding scheme. To achieve this we pass the ``rotated_psl`` +cube to the regridder to supply the target grid to regrid the ``global_air_temp`` +cube onto: + + >>> rotated_air_temp = global_air_temp.regrid(rotated_psl, iris.analysis.Linear()) + +.. plot:: userguide/regridding_plots/regridded_to_rotated.py + +We could regrid the pressure values onto the global grid, but this will involve +some form of extrapolation. As with interpolation, we can control the extrapolation +mode when defining the regridding scheme. + +For the available regridding schemes in Iris, the ``extrapolation_mode`` keyword +must be one of: + +* ``extrapolate`` -- + + * for :class:`~iris.analysis.Linear` the extrapolation points will be calculated by extending the gradient of the closest two points. + * for :class:`~iris.analysis.Nearest` the extrapolation points will take their value from the nearest source point. + +* ``nan`` -- the extrapolation points will be be set to NaN. +* ``error`` -- a ValueError exception will be raised, notifying an attempt to extrapolate. +* ``mask`` -- the extrapolation points will always be masked, even if the source data is not a MaskedArray. +* ``nanmask`` -- if the source data is a MaskedArray the extrapolation points will be masked. Otherwise they will be set to NaN. + +The ``rotated_psl`` cube is defined on a limited area rotated pole grid. If we regridded +the ``rotated_psl`` cube onto the global grid as defined by the ``global_air_temp`` cube +any linearly extrapolated values would quickly become dominant and highly inaccurate. +We can control this behaviour by defining the ``extrapolation_mode`` in the constructor +of the regridding scheme to mask values that lie outside of the domain of the rotated +pole grid: + + >>> scheme = iris.analysis.Linear(extrapolation_mode='mask') + >>> global_psl = rotated_psl.regrid(global_air_temp, scheme) + +.. plot:: userguide/regridding_plots/regridded_to_global.py + +Notice that although we can still see the approximate shape of the rotated pole grid, the +cells have now become rectangular in a plate carrée (equirectangular) projection. +The spatial grid of the resulting cube is really global, with a large proportion of the +data being masked. + +Area-Weighted Regridding +^^^^^^^^^^^^^^^^^^^^^^^^ + +It is often the case that a point-based regridding scheme (such as +:class:`iris.analysis.Linear` or :class:`iris.analysis.Nearest`) is not +appropriate when you need to conserve quantities when regridding. The +:class:`iris.analysis.AreaWeighted` scheme is less general than +:class:`~iris.analysis.Linear` or :class:`~iris.analysis.Nearest`, but is a +conservative regridding scheme, meaning that the area-weighted total is +approximately preserved across grids. + +With the :class:`~iris.analysis.AreaWeighted` regridding scheme, each target grid-box's +data is computed as a weighted mean of all grid-boxes from the source grid. The weighting +for any given target grid-box is the area of the intersection with each of the +source grid-boxes. This scheme performs well when regridding from a high +resolution source grid to a lower resolution target grid, since all source data +points will be accounted for in the target grid. + +Let's demonstrate this with the global air temperature cube we saw previously, +along with a limited area cube containing total concentration of volcanic ash: + + >>> global_air_temp = iris.load_cube(iris.sample_data_path('air_temp.pp')) + >>> print(global_air_temp.summary(shorten=True)) + air_temperature / (K) (latitude: 73; longitude: 96) + >>> + >>> regional_ash = iris.load_cube(iris.sample_data_path('NAME_output.txt')) + >>> regional_ash = regional_ash.collapsed('flight_level', iris.analysis.SUM) + >>> print(regional_ash.summary(shorten=True)) + VOLCANIC_ASH_AIR_CONCENTRATION / (g/m3) (latitude: 214; longitude: 584) + +One of the key limitations of the :class:`~iris.analysis.AreaWeighted` +regridding scheme is that the two input grids must be defined in the same +coordinate system as each other. Both input grids must also contain monotonic, +bounded, 1D spatial coordinates. + +.. note:: + + The :class:`~iris.analysis.AreaWeighted` regridding scheme requires spatial + areas, therefore the longitude and latitude coordinates must be bounded. + If the longitude and latitude bounds are not defined in the cube we can + guess the bounds based on the coordinates' point values: + + >>> global_air_temp.coord('longitude').guess_bounds() + >>> global_air_temp.coord('latitude').guess_bounds() + +Using NumPy's masked array module we can mask any data that falls below a meaningful +concentration: + + >>> regional_ash.data = np.ma.masked_less(regional_ash.data, 5e-6) + +Finally, we can regrid the data using the :class:`~iris.analysis.AreaWeighted` +regridding scheme: + + >>> scheme = iris.analysis.AreaWeighted(mdtol=0.5) + >>> global_ash = regional_ash.regrid(global_air_temp, scheme) + >>> print(global_ash.summary(shorten=True)) + VOLCANIC_ASH_AIR_CONCENTRATION / (g/m3) (latitude: 73; longitude: 96) + +Note that the :class:`~iris.analysis.AreaWeighted` regridding scheme allows us +to define a missing data tolerance (``mdtol``), which specifies the tolerated +fraction of masked data in any given target grid-box. If the fraction of masked +data within a target grid-box exceeds this value, the data in this target +grid-box will be masked in the result. + +The fraction of masked data is calculated based on the area of masked source +grid-boxes that overlaps with each target grid-box. Defining an ``mdtol`` in the +:class:`~iris.analysis.AreaWeighted` regridding scheme allows fine control +of masked data tolerance. It is worth remembering that defining an ``mdtol`` of +anything other than 1 will prevent the scheme from being fully conservative, as +some data will be disregarded if it lies close to masked data. + +To visualise the above regrid, let's plot the original data, along with 3 distinct +``mdtol`` values to compare the result: + +.. plot:: userguide/regridding_plots/regridded_to_global_area_weighted.py + + +.. _caching_a_regridder: + +Caching a Regridder +^^^^^^^^^^^^^^^^^^^ + +If you need to regrid multiple cubes with a common source grid onto a common +target grid you can 'cache' a regridder to be used for each of these regrids. +This can shorten the execution time of your code as the most computationally +intensive part of a regrid is setting up the regridder. + +To cache a regridder you must set up a regridder scheme and call the +scheme's regridder method. The regridder method takes as arguments: + +#. a cube (that is to be regridded) defining the source grid, and +#. a cube defining the target grid to regrid the source cube to. + +For example: + + >>> global_air_temp = iris.load_cube(iris.sample_data_path('air_temp.pp')) + >>> rotated_psl = iris.load_cube(iris.sample_data_path('rotated_pole.nc')) + >>> regridder = iris.analysis.Nearest().regridder(global_air_temp, rotated_psl) + +When this cached regridder is called you must pass it a cube on the same grid +as the source grid cube (in this case ``global_air_temp``) that is to be +regridded to the target grid. For example:: + + >>> for cube in list_of_cubes_on_source_grid: + ... result = regridder(cube) + +In each case ``result`` will be the input cube regridded to the grid defined by +the target grid cube (in this case ``rotated_psl``) that we used to define the +cached regridder. + +Regridding Lazy Data +^^^^^^^^^^^^^^^^^^^^ + +If you are working with large cubes, especially when you are regridding to a +high resolution target grid, you may run out of memory when trying to +regrid a cube. When this happens, make sure the input cube has lazy data + + >>> air_temp = iris.load_cube(iris.sample_data_path('A1B_north_america.nc')) + >>> air_temp + + >>> air_temp.has_lazy_data() + True + +and the regridding scheme supports lazy data. All regridding schemes described +here support lazy data. If you still run out of memory even while using lazy +data, inspect the +`chunks `__ +: + + >>> air_temp.lazy_data().chunks + ((240,), (37,), (49,)) + +The cube above consist of a single chunk, because it is fairly small. For +larger cubes, iris will automatically create chunks of an optimal size when +loading the data. However, because regridding to a high resolution grid +may dramatically increase the size of the data, the automatically chosen +chunks might be too large. + +As an example of how to solve this, we could manually re-chunk the time +dimension, to regrid it in 8 chunks of 30 timesteps at a time: + + >>> air_temp.data = air_temp.lazy_data().rechunk([30, None, None]) + >>> air_temp.lazy_data().chunks + ((30, 30, 30, 30, 30, 30, 30, 30), (37,), (49,)) + +Assuming that Dask is configured such that it processes only a few chunks of +the data array at a time, this will further reduce memory use. + +Note that chunking in the horizontal dimensions is not supported by the +regridding schemes. Chunks in these dimensions will automatically be combined +before regridding. diff --git a/docs/src/userguide/iris_cubes.rst b/docs/src/userguide/iris_cubes.rst new file mode 100644 index 0000000000..03b5093efc --- /dev/null +++ b/docs/src/userguide/iris_cubes.rst @@ -0,0 +1,228 @@ +.. _iris_data_structures: + +==================== +Iris Data Structures +==================== + +The top level object in Iris is called a cube. A cube contains data and +metadata about a phenomenon. + +In Iris, a cube is an interpretation of the *Climate and Forecast (CF) +Metadata Conventions* whose purpose is to: + +.. card:: + + *require conforming datasets to contain sufficient metadata that they are + self-describing... including physical units if appropriate, and that each + value can be located in space (relative to earth-based coordinates) and + time.* + + +Whilst the CF conventions are often mentioned alongside NetCDF, Iris implements +several major format importers which can take files of specific formats and +turn them into Iris cubes. Additionally, a framework is provided which allows +users to extend Iris' import capability to cater for specialist or +unimplemented formats. + +A single cube describes one and only one phenomenon, always has a name, a unit +and an n-dimensional data array to represents the cube's phenomenon. In order +to locate the data spatially, temporally, or in any other higher-dimensional +space, a collection of *coordinates* exist on the cube. + + +Coordinates +=========== + +A coordinate is a container to store metadata about some dimension(s) of a +cube's data array and therefore, by definition, its phenomenon. + +* Each coordinate has a name and a unit. +* When a coordinate is added to a cube, the data dimensions that it + represents are also provided. + + * The shape of a coordinate is always the same as the shape of the + associated data dimension(s) on the cube. + * A dimension not explicitly listed signifies that the coordinate is + independent of that dimension. + * Each dimension of a coordinate must be mapped to a data dimension. The + only coordinates with no mapping are scalar coordinates. + +* Depending on the underlying data that the coordinate is representing, its + values may be discrete points or be bounded to represent interval extents + (e.g. temperature at *point x* **vs** rainfall accumulation *between + 0000-1200 hours*). +* Coordinates have an attributes dictionary which can hold arbitrary extra + metadata, excluding certain restricted CF names +* More complex coordinates may contain a coordinate system which is + necessary to fully interpret the values contained within the coordinate. + +There are two classes of coordinates: + +**DimCoord** + +* Numeric +* Monotonic +* Representative of, at most, a single data dimension (1d) + +**AuxCoord** + +* May be of any type, including strings +* May represent multiple data dimensions (n-dimensional) + + +Cube +==== +A cube consists of: + +* a standard name and/or a long name and an appropriate unit +* a data array who's values are representative of the phenomenon +* a collection of coordinates and associated data dimensions on the cube's + data array, which are split into two separate lists: + + * *dimension coordinates* - DimCoords which uniquely map to exactly one + data dimension, ordered by dimension. + * *auxiliary coordinates* - DimCoords or AuxCoords which map to as many + data dimensions as the coordinate has dimensions. + +* an attributes dictionary which, other than some protected CF names, can + hold arbitrary extra metadata. This implements the concept of dataset-level + and variable-level attributes when loading and and saving NetCDF files (see + :class:`~iris.cube.CubeAttrsDict` and NetCDF + :func:`~iris.fileformats.netcdf.saver.save` for more). +* a list of cell methods to represent operations which have already been + applied to the data (e.g. "mean over time") +* a list of coordinate "factories" used for deriving coordinates from the + values of other coordinates in the cube + + +Cubes in Practice +----------------- + +A Simple Cube Example +===================== + +Suppose we have some gridded data which has 24 air temperature readings +(in Kelvin) which is located at 4 different longitudes, 2 different latitudes +and 3 different heights. Our data array can be represented pictorially: + +.. image:: multi_array.svg + +Where dimensions 0, 1, and 2 have lengths 3, 2 and 4 respectively. + +The Iris cube to represent this data would consist of: + +* a standard name of ``air_temperature`` and a unit of ``kelvin`` +* a data array of shape ``(3, 2, 4)`` +* a coordinate, mapping to dimension 0, consisting of: + + * a standard name of ``height`` and unit of ``meters`` + * an array of length 3 representing the 3 ``height`` points + +* a coordinate, mapping to dimension 1, consisting of: + + * a standard name of ``latitude`` and unit of ``degrees`` + * an array of length 2 representing the 2 latitude points + * a coordinate system such that the ``latitude`` points could be fully + located on the globe + +* a coordinate, mapping to dimension 2, consisting of: + + * a standard name of ``longitude`` and unit of ``degrees`` + * an array of length 4 representing the 4 longitude points + * a coordinate system such that the ``longitude`` points could be fully + located on the globe + +Pictorially the cube has taken on more information than a simple array: + + +.. image:: multi_array_to_cube.svg + + +Additionally further information may be optionally attached to the cube. +For example, it is possible to attach any of the following: + +* a coordinate, not mapping to any data dimensions, consisting of: + + * a standard name of ``time`` and unit of ``days since 2000-01-01 00:00`` + * a data array of length 1 representing the time that the data array is + valid for + +* an auxiliary coordinate, mapping to dimensions 1 and 2, consisting of: + + * a long name of ``place name`` and no unit + * a 2d string array of shape ``(2, 4)`` with the names of the 8 places + that the lat/lons correspond to + +* an auxiliary coordinate "factory", which can derive its own mapping, + consisting of: + + * a standard name of ``height`` and a unit of ``feet`` + * knowledge of how data values for this coordinate can be calculated + given the ``height in meters`` coordinate + +* a cell method of "mean" over "ensemble" to indicate that the data has been + meaned over a collection of "ensembles" (i.e. multiple model runs). + + +Printing a Cube +=============== + +Every Iris cube can be printed to screen as you will see later in the user +guide. It is worth familiarising yourself with the output as this is the +quickest way of inspecting the contents of a cube. Here is the result of +printing a real life cube: + +.. _hybrid_cube_printout: + +.. testcode:: + :hide: + + import iris + filename = iris.sample_data_path('uk_hires.pp') + # NOTE: Every time the output of this cube changes, the full list of deductions below should be re-assessed. + print(iris.load_cube(filename, 'air_potential_temperature')) + +.. testoutput:: + + air_potential_temperature / (K) (time: 3; model_level_number: 7; grid_latitude: 204; grid_longitude: 187) + Dimension coordinates: + time x - - - + model_level_number - x - - + grid_latitude - - x - + grid_longitude - - - x + Auxiliary coordinates: + forecast_period x - - - + level_height - x - - + sigma - x - - + surface_altitude - - x x + Derived coordinates: + altitude - x x x + Scalar coordinates: + forecast_reference_time 2009-11-19 04:00:00 + Attributes: + STASH m01s00i004 + source 'Data from Met Office Unified Model' + um_version '7.3' + + +Using this output we can deduce that: + +* The cube represents air potential temperature. +* There are 4 data dimensions, and the data has a shape of ``(3, 7, 204, 187)`` +* The 4 data dimensions are mapped to the ``time``, ``model_level_number``, + ``grid_latitude``, ``grid_longitude`` coordinates respectively +* There are three 1d auxiliary coordinates and one 2d auxiliary + (``surface_altitude``) +* There is a single ``altitude`` derived coordinate, which spans 3 data + dimensions +* There are 7 distinct values in the "model_level_number" coordinate. Similar + inferences can + be made for the other dimension coordinates. +* There are 7, not necessarily distinct, values in the ``level_height`` + coordinate. +* There is a single ``forecast_reference_time`` scalar coordinate representing + the entire cube. +* The cube has one further attribute relating to the phenomenon. + In this case the originating file format, PP, encodes information in a STASH + code which in some cases can be useful for identifying advanced experiment + information relating to the phenomenon. diff --git a/docs/src/userguide/loading_iris_cubes.rst b/docs/src/userguide/loading_iris_cubes.rst new file mode 100644 index 0000000000..b71f033c30 --- /dev/null +++ b/docs/src/userguide/loading_iris_cubes.rst @@ -0,0 +1,293 @@ +.. _loading_iris_cubes: + +=================== +Loading Iris Cubes +=================== + +To load a single file into a **list** of Iris cubes +the :py:func:`iris.load` function is used:: + + import iris + filename = '/path/to/file' + cubes = iris.load(filename) + +Iris will attempt to return **as few cubes as possible** +by collecting together multiple fields with a shared standard name +into a single multidimensional cube. + +The :py:func:`iris.load` function automatically recognises the format +of the given files and attempts to produce Iris Cubes from their contents. + +.. note:: + + Currently there is support for CF NetCDF, GRIB 1 & 2, PP and FieldsFiles + file formats with a framework for this to be extended to custom formats. + + +In order to find out what has been loaded, the result can be printed: + + >>> import iris + >>> filename = iris.sample_data_path('uk_hires.pp') + >>> cubes = iris.load(filename) + >>> print(cubes) + 0: air_potential_temperature / (K) (time: 3; model_level_number: 7; grid_latitude: 204; grid_longitude: 187) + 1: surface_altitude / (m) (grid_latitude: 204; grid_longitude: 187) + + +This shows that there were 2 cubes as a result of loading the file, they were: +``air_potential_temperature`` and ``surface_altitude``. + +The ``surface_altitude`` cube was 2 dimensional with: + +* the two dimensions have extents of 204 and 187 respectively and are + represented by the ``grid_latitude`` and ``grid_longitude`` coordinates. + +The ``air_potential_temperature`` cubes were 4 dimensional with: + +* the same length ``grid_latitude`` and ``grid_longitude`` dimensions as + ``surface_altitide`` +* a ``time`` dimension of length 3 +* a ``model_level_number`` dimension of length 7 + +.. note:: + + The result of :func:`iris.load` is **always** a :class:`iris.cube.CubeList` + (even if it only contains one :class:`iris.cube.Cube` - see + :ref:`strict-loading`). Anything that can be done with a Python + :class:`list` can be done with an :class:`iris.cube.CubeList`. + + The order of this list should not be relied upon. Ways of loading a + specific cube or cubes are covered in :ref:`constrained-loading` and + :ref:`strict-loading`. + +.. hint:: + + Throughout this user guide you will see the function + ``iris.sample_data_path`` being used to get the filename for the resources + used in the examples. The result of this function is just a string. + + Using this function allows us to provide examples which will work + across platforms and with data installed in different locations, + however in practice you will want to use your own strings:: + + filename = '/path/to/file' + cubes = iris.load(filename) + +To get the air potential temperature cube from the list of cubes +returned by :py:func:`iris.load` in the previous example, +list indexing can be used: + + >>> import iris + >>> filename = iris.sample_data_path('uk_hires.pp') + >>> cubes = iris.load(filename) + >>> # get the first cube (list indexing is 0 based) + >>> air_potential_temperature = cubes[0] + >>> print(air_potential_temperature) + air_potential_temperature / (K) (time: 3; model_level_number: 7; grid_latitude: 204; grid_longitude: 187) + Dimension coordinates: + time x - - - + model_level_number - x - - + grid_latitude - - x - + grid_longitude - - - x + Auxiliary coordinates: + forecast_period x - - - + level_height - x - - + sigma - x - - + surface_altitude - - x x + Derived coordinates: + altitude - x x x + Scalar coordinates: + forecast_reference_time 2009-11-19 04:00:00 + Attributes: + STASH m01s00i004 + source 'Data from Met Office Unified Model' + um_version '7.3' + +Notice that the result of printing a **cube** is a little more verbose than +it was when printing a **list of cubes**. In addition to the very short summary +which is provided when printing a list of cubes, information is provided +on the coordinates which constitute the cube in question. +This was the output discussed at the end of the :doc:`iris_cubes` section. + +.. note:: + + Dimensioned coordinates will have a dimension marker ``x`` in the + appropriate column for each cube data dimension that they describe. + + +Loading Multiple Files +----------------------- + +To load more than one file into a list of cubes, a list of filenames can be +provided to :py:func:`iris.load`:: + + filenames = [iris.sample_data_path('uk_hires.pp'), + iris.sample_data_path('air_temp.pp')] + cubes = iris.load(filenames) + + +It is also possible to load one or more files with wildcard substitution +using the expansion rules defined :py:mod:`fnmatch`. + +For example, to match **zero or more characters** in the filename, +star wildcards can be used:: + + filename = iris.sample_data_path('GloSea4', '*.pp') + cubes = iris.load(filename) + + +.. note:: + + The cubes returned will not necessarily be in the same order as the + order of the filenames. + +Lazy Loading +------------ + +In fact when Iris loads data from most file types, it normally only reads the +essential descriptive information or metadata : the bulk of the actual data +content will only be loaded later, as it is needed. +This is referred to as 'lazy' data. It allows loading to be much quicker, and to occupy less memory. + +For more on the benefits, handling and uses of lazy data, see :doc:`Real and Lazy Data `. + + +.. _constrained-loading: + +Constrained Loading +----------------------- +Given a large dataset, it is possible to restrict or constrain the load +to match specific Iris cube metadata. +Constrained loading provides the ability to generate a cube +from a specific subset of data that is of particular interest. + +As we have seen, loading the following file creates several Cubes:: + + filename = iris.sample_data_path('uk_hires.pp') + cubes = iris.load(filename) + +Specifying a name as a constraint argument to :py:func:`iris.load` will mean +only cubes with matching :meth:`name ` +will be returned:: + + filename = iris.sample_data_path('uk_hires.pp') + cubes = iris.load(filename, 'surface_altitude') + +Note that, the provided name will match against either the standard name, +long name, NetCDF variable name or STASH metadata of a cube. Therefore, the +previous example using the ``surface_altitude`` standard name constraint can +also be achieved using the STASH value of ``m01s00i033``:: + + filename = iris.sample_data_path('uk_hires.pp') + cubes = iris.load(filename, 'm01s00i033') + +If further specific name constraint control is required i.e., to constrain +against a combination of standard name, long name, NetCDF variable name and/or +STASH metadata, consider using the :class:`iris.NameConstraint`. For example, +to constrain against both a standard name of ``surface_altitude`` **and** a STASH +of ``m01s00i033``:: + + filename = iris.sample_data_path('uk_hires.pp') + constraint = iris.NameConstraint(standard_name='surface_altitude', STASH='m01s00i033') + cubes = iris.load(filename, constraint) + +To constrain the load to multiple distinct constraints, a list of constraints +can be provided. This is equivalent to running load once for each constraint +but is likely to be more efficient:: + + filename = iris.sample_data_path('uk_hires.pp') + cubes = iris.load(filename, ['air_potential_temperature', 'surface_altitude']) + +The :class:`iris.Constraint` class can be used to restrict coordinate values +on load. For example, to constrain the load to match +a specific ``model_level_number``:: + + filename = iris.sample_data_path('uk_hires.pp') + level_10 = iris.Constraint(model_level_number=10) + cubes = iris.load(filename, level_10) + +Further details on using :class:`iris.Constraint` are +discussed later in :ref:`cube_extraction`. + +.. _strict-loading: + +Strict Loading +-------------- + +The :py:func:`iris.load_cube` and :py:func:`iris.load_cubes` functions are +similar to :py:func:`iris.load` except they can only return +*one cube per constraint*. +The :func:`iris.load_cube` function accepts a single constraint and +returns a single cube. The :func:`iris.load_cubes` function accepts any +number of constraints and returns a list of cubes (as an `iris.cube.CubeList`). +Providing no constraints to :func:`iris.load_cube` or :func:`iris.load_cubes` +is equivalent to requesting exactly one cube of any type. + +A single cube is loaded in the following example:: + + >>> filename = iris.sample_data_path('air_temp.pp') + >>> cube = iris.load_cube(filename) + >>> print(cube) + air_temperature / (K) (latitude: 73; longitude: 96) + Dimension coordinates: + latitude x - + longitude - x + ... + Cell methods: + 0 time: mean + +However, when attempting to load data which would result in anything other than +one cube, an exception is raised:: + + >>> filename = iris.sample_data_path('uk_hires.pp') + >>> cube = iris.load_cube(filename) + Traceback (most recent call last): + ... + iris.exceptions.ConstraintMismatchError: Expected exactly one cube, found 2. + +.. note:: + + All the load functions share many of the same features, hence + multiple files could be loaded with wildcard filenames + or by providing a list of filenames. + +The strict nature of :func:`iris.load_cube` and :func:`iris.load_cubes` +means that, when combined with constrained loading, it is possible to +ensure that precisely what was asked for on load is given +- otherwise an exception is raised. +This fact can be utilised to make code only run successfully if +the data provided has the expected criteria. + +For example, suppose that code needed ``air_potential_temperature`` +in order to run:: + + import iris + filename = iris.sample_data_path('uk_hires.pp') + air_pot_temp = iris.load_cube(filename, 'air_potential_temperature') + print(air_pot_temp) + +Should the file not produce exactly one cube with a standard name of +'air_potential_temperature', an exception will be raised. + +Similarly, supposing a routine needed both 'surface_altitude' and +'air_potential_temperature' to be able to run:: + + import iris + filename = iris.sample_data_path('uk_hires.pp') + altitude_cube, pot_temp_cube = iris.load_cubes(filename, ['surface_altitude', 'air_potential_temperature']) + +The result of :func:`iris.load_cubes` in this case will be a list of 2 cubes +ordered by the constraints provided. Multiple assignment has been used to put +these two cubes into separate variables. + +.. note:: + + In Python, lists of a pre-known length and order can be exploited + using *multiple assignment*: + + >>> number_one, number_two = [1, 2] + >>> print(number_one) + 1 + >>> print(number_two) + 2 + diff --git a/docs/src/userguide/merge.svg b/docs/src/userguide/merge.svg new file mode 100644 index 0000000000..0f0d37a1ca --- /dev/null +++ b/docs/src/userguide/merge.svg @@ -0,0 +1,753 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + y + + + + + + + + + + + + + + + + + + + + + 0 + + 1 + 2 + 3 + 0 + 1 + 2 + 3 + + + + + + + + + x + z + x + z + + + diff --git a/docs/iris/src/userguide/merge_and_concat.rst b/docs/src/userguide/merge_and_concat.rst similarity index 92% rename from docs/iris/src/userguide/merge_and_concat.rst rename to docs/src/userguide/merge_and_concat.rst index 8321f4106e..d754e08cc1 100644 --- a/docs/iris/src/userguide/merge_and_concat.rst +++ b/docs/src/userguide/merge_and_concat.rst @@ -16,20 +16,20 @@ issues from occurring. Both ``merge`` and ``concatenate`` take multiple cubes as input and result in fewer cubes as output. The following diagram illustrates the two processes: -.. image:: merge_and_concat.png +.. image:: merge_and_concat.svg :alt: Pictographic of merge and concatenation. :align: center There is one major difference between the ``merge`` and ``concatenate`` processes. - * The ``merge`` process combines multiple input cubes into a - single resultant cube with new dimensions created from the - *scalar coordinate values* of the input cubes. +* The ``merge`` process combines multiple input cubes into a + single resultant cube with new dimensions created from the + *scalar coordinate values* of the input cubes. - * The ``concatenate`` process combines multiple input cubes into a - single resultant cube with the same *number of dimensions* as the input cubes, - but with the length of one or more dimensions extended by *joining together - sequential dimension coordinates*. +* The ``concatenate`` process combines multiple input cubes into a + single resultant cube with the same *number of dimensions* as the input cubes, + but with the length of one or more dimensions extended by *joining together + sequential dimension coordinates*. Let's imagine 28 individual cubes representing the temperature at a location ``(y, x)``; one cube for each day of February. We can use @@ -108,25 +108,27 @@ make a new ``z`` dimension coordinate: >>> print(cubes[0]) air_temperature / (kelvin) (y: 4; x: 5) ... - Scalar coordinates: - z: 1 meters + Scalar coordinates: + z 1 meters >>> print(cubes[1]) air_temperature / (kelvin) (y: 4; x: 5) ... - Scalar coordinates: - z: 2 meters + Scalar coordinates: + z 2 meters >>> print(cubes[2]) air_temperature / (kelvin) (y: 4; x: 5) - ... - Scalar coordinates: - z: 3 meters + Dimension coordinates: + y x - + x - x + Scalar coordinates: + z 3 meters >>> print(cubes.merge()) 0: air_temperature / (kelvin) (z: 3; y: 4; x: 5) The following diagram illustrates what has taken place in this example: -.. image:: merge.png +.. image:: merge.svg :alt: Pictographic of merge. :align: center @@ -203,7 +205,7 @@ single cube. An example of fixing an issue like this can be found in the :ref:`merge_concat_common_issues` section. -Merge in Iris load +Merge in Iris Load ================== The CubeList's :meth:`~iris.cube.CubeList.merge` method is used internally @@ -251,6 +253,11 @@ which are described below. Using CubeList.concatenate ========================== +.. seealso:: + + Relevant gallery example: + :ref:`sphx_glr_generated_gallery_general_plot_projections_and_annotations.py` (Brief concatenating examples) + The :meth:`CubeList.concatenate ` method operates on a list of cubes and returns a new :class:`~iris.cube.CubeList` containing the cubes that have been concatenated. @@ -287,7 +294,7 @@ cubes to form a new cube with an extended ``t`` coordinate: The following diagram illustrates what has taken place in this example: -.. image:: concat.png +.. image:: concat.svg :alt: Pictographic of concatenate. :align: center @@ -365,7 +372,7 @@ single cube. An example of fixing an issue like this can be found in the .. _merge_concat_common_issues: -Common issues with merge and concatenate +Common Issues With Merge and Concatenate ---------------------------------------- The Iris algorithms that drive :meth:`~iris.cube.CubeList.merge` and @@ -378,7 +385,7 @@ If this consistency is not maintained then the seemingly arbitrary manner. The methods :meth:`~iris.cube.CubeList.merge_cube` and -:meth:`~iris.cube.CubeList.concatenate_cube` +:meth:`~iris.cube.CubeList.concatenate_cube` were introduced to Iris to help you locate differences in input cubes that prevent the input cubes merging or concatenating. Nevertheless, certain difficulties with using @@ -398,7 +405,7 @@ Merge Differences in the :data:`~iris.cube.Cube.attributes` the input cubes probably cause the greatest amount of merge-related difficulties. In recognition of this, Iris has a helper function, -:func:`~iris.experimental.equalise_cubes.equalise_attributes`, to equalise +:func:`~iris.util.equalise_attributes`, to equalise attributes differences in the input cubes. .. note:: @@ -407,16 +414,16 @@ attributes differences in the input cubes. :meth:`iris.cube.Cube.is_compatible` are **not** designed to give user indication of whether two cubes can be merged. -To demonstrate using :func:`~iris.experimental.equalise_cubes.equalise_attributes`, +To demonstrate using :func:`~iris.util.equalise_attributes`, let's return to our non-merging list of input cubes from the merge_cube example from earlier. -We'll call :func:`~iris.experimental.equalise_cubes.equalise_attributes` on the +We'll call :func:`~iris.util.equalise_attributes` on the input cubes before merging the input cubes using :meth:`~iris.cube.CubeList.merge_cube`: .. doctest:: merge_vs_merge_cube :options: +ELLIPSIS, +NORMALIZE_WHITESPACE - >>> from iris.experimental.equalise_cubes import equalise_attributes + >>> from iris.util import equalise_attributes >>> print(cubes) 0: air_temperature / (kelvin) (y: 4; x: 5) 1: air_temperature / (kelvin) (y: 4; x: 5) @@ -436,7 +443,7 @@ input cubes before merging the input cubes using :meth:`~iris.cube.CubeList.merg iris.exceptions.MergeError: failed to merge into a single cube. cube.attributes keys differ: 'Conventions' - >>> equalise_attributes(cubes) + >>> removed_attributes = equalise_attributes(cubes) >>> print(cubes[0].attributes) {} @@ -473,7 +480,7 @@ This is shown in the example below:: **Merging Duplicate Cubes** -The Iris load process does not merge duplicate cubes (two or more identical cubes in +The Iris merge process does not merge duplicate cubes (two or more identical cubes in the input cubes) by default. This behaviour can be changed by setting the ``unique`` keyword argument to :meth:`~iris.cube.CubeList.merge` to ``False``. @@ -529,7 +536,7 @@ Trying to merge the input cubes with duplicate cubes not allowed raises an error highlighting the presence of the duplicate cube. -**Single value coordinates** +**Single Value Coordinates** Coordinates containing only a single value can cause confusion when combining input cubes. Remember: @@ -553,18 +560,18 @@ combine your cubes:: >>> print(cubes[0]) air_temperature / (kelvin) (y: 4; x: 5) - Dimension coordinates: - x x - - y - x - Scalar coordinates: - z: 1 + Dimension coordinates: + y x - + x - x + Scalar coordinates: + z 1 meters >>> print(cubes[1]) air_temperature / (kelvin) (y: 4; x: 5) - Dimension coordinates: - x x - - y - x - Scalar coordinates: - z: 2 + Dimension coordinates: + y x - + x - x + Scalar coordinates: + z 2 meters If your cubes are similar to those below (the single value ``z`` coordinate is diff --git a/docs/iris/src/userguide/merge_and_concat.svg b/docs/src/userguide/merge_and_concat.svg similarity index 100% rename from docs/iris/src/userguide/merge_and_concat.svg rename to docs/src/userguide/merge_and_concat.svg diff --git a/docs/src/userguide/multi_array.svg b/docs/src/userguide/multi_array.svg new file mode 100644 index 0000000000..38ba58744f --- /dev/null +++ b/docs/src/userguide/multi_array.svg @@ -0,0 +1,487 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + 2 + 1 + 0 + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/src/userguide/multi_array_to_cube.svg b/docs/src/userguide/multi_array_to_cube.svg new file mode 100644 index 0000000000..8b0cc529dd --- /dev/null +++ b/docs/src/userguide/multi_array_to_cube.svg @@ -0,0 +1,1436 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 2 + + -180 + 90 + 0 + 90 + Longitude (degrees) + Height (meters) + + + + + + + + Latitude (degrees) + + 1 + + + -45 + 45 + + 0 + Air temperature (kelvin) + 2 + 10 + 25 + + diff --git a/docs/iris/src/userguide/navigating_a_cube.rst b/docs/src/userguide/navigating_a_cube.rst similarity index 80% rename from docs/iris/src/userguide/navigating_a_cube.rst rename to docs/src/userguide/navigating_a_cube.rst index 055617e047..ec3cd8e0dc 100644 --- a/docs/iris/src/userguide/navigating_a_cube.rst +++ b/docs/src/userguide/navigating_a_cube.rst @@ -1,5 +1,5 @@ ================= -Navigating a cube +Navigating a Cube ================= .. testsetup:: @@ -15,7 +15,7 @@ Navigating a cube After loading any cube, you will want to investigate precisely what it contains. This section is all about accessing and manipulating the metadata contained within a cube. -Cube string representations +Cube String Representations --------------------------- We have already seen a basic string representation of a cube when printing: @@ -25,17 +25,17 @@ We have already seen a basic string representation of a cube when printing: >>> cube = iris.load_cube(filename) >>> print(cube) air_pressure_at_sea_level / (Pa) (grid_latitude: 22; grid_longitude: 36) - Dimension coordinates: - grid_latitude x - - grid_longitude - x - Scalar coordinates: - forecast_period: 0.0 hours - forecast_reference_time: 2006-06-15 00:00:00 - time: 2006-06-15 00:00:00 - Attributes: - Conventions: CF-1.5 - STASH: m01s16i222 - source: Data from Met Office Unified Model 6.01 + Dimension coordinates: + grid_latitude x - + grid_longitude - x + Scalar coordinates: + forecast_period 0.0 hours + forecast_reference_time 2006-06-15 00:00:00 + time 2006-06-15 00:00:00 + Attributes: + Conventions 'CF-1.5' + STASH m01s16i222 + source 'Data from Met Office Unified Model 6.01' This representation is equivalent to passing the cube to the :func:`str` function. This function can be used on @@ -52,7 +52,7 @@ variable. In most cases it is reasonable to ignore anything starting with a "``_ dir(cube) help(cube) -Working with cubes +Working With Cubes ------------------ Every cube has a standard name, long name and units which are accessed with @@ -65,7 +65,8 @@ and :attr:`Cube.units ` respectively:: print(cube.units) Interrogating these with the standard :func:`type` function will tell you that ``standard_name`` and ``long_name`` -are either a string or ``None``, and ``units`` is an instance of :class:`iris.unit.Unit`. +are either a string or ``None``, and ``units`` is an instance of :class:`iris.unit.Unit`. A more in depth discussion on +the cube units and their functional effects can be found at the end of :doc:`cube_maths`. You can access a string representing the "name" of a cube with the :meth:`Cube.name() ` method:: @@ -109,8 +110,12 @@ cube with the :attr:`Cube.cell_methods ` attribute: print(cube.cell_methods) +.. seealso:: -Accessing coordinates on the cube + Relevant gallery example: + :ref:`sphx_glr_generated_gallery_meteorology_plot_wind_barbs.py` + +Accessing Coordinates on the Cube --------------------------------- A cube's coordinates can be retrieved via :meth:`Cube.coords `. @@ -147,7 +152,7 @@ numpy array. If the coordinate has no bounds ``None`` will be returned:: print(type(coord.bounds)) -Adding metadata to a cube +Adding Metadata to a Cube ------------------------- We can add and remove coordinates via :func:`Cube.add_dim_coord`, @@ -159,33 +164,37 @@ We can add and remove coordinates via :func:`Cube.add_dim_coord>> cube.add_aux_coord(new_coord) >>> print(cube) air_pressure_at_sea_level / (Pa) (grid_latitude: 22; grid_longitude: 36) - Dimension coordinates: - grid_latitude x - - grid_longitude - x - Scalar coordinates: - forecast_period: 0.0 hours - forecast_reference_time: 2006-06-15 00:00:00 - my_custom_coordinate: 1 - time: 2006-06-15 00:00:00 - Attributes: - Conventions: CF-1.5 - STASH: m01s16i222 - source: Data from Met Office Unified Model 6.01 + Dimension coordinates: + grid_latitude x - + grid_longitude - x + Scalar coordinates: + forecast_period 0.0 hours + forecast_reference_time 2006-06-15 00:00:00 + my_custom_coordinate 1 + time 2006-06-15 00:00:00 + Attributes: + Conventions 'CF-1.5' + STASH m01s16i222 + source 'Data from Met Office Unified Model 6.01' The coordinate ``my_custom_coordinate`` now exists on the cube and is listed under the non-dimensioned single valued scalar coordinates. +.. seealso:: + + Relevant gallery example: + :ref:`sphx_glr_generated_gallery_general_plot_custom_file_loading.py` (Adding Metadata) -Adding and removing metadata to the cube at load time +Adding and Removing Metadata to the Cube at Load Time ----------------------------------------------------- Sometimes when loading a cube problems occur when the amount of metadata is more or less than expected. This is often caused by one of the following: - * The file does not contain enough metadata, and therefore the cube cannot know everything about the file. - * Some of the metadata of the file is contained in the filename, but is not part of the actual file. - * There is not enough metadata loaded from the original file as Iris has not handled the format fully. *(in which case, - please let us know about it)* +* The file does not contain enough metadata, and therefore the cube cannot know everything about the file. +* Some of the metadata of the file is contained in the filename, but is not part of the actual file. +* There is not enough metadata loaded from the original file as Iris has not handled the format fully. *(in which case, + please let us know about it)* To solve this, all of :func:`iris.load`, :func:`iris.load_cube`, and :func:`iris.load_cubes` support a callback keyword. @@ -229,7 +238,7 @@ by field basis *before* they are automatically merged together: # Add our own realization coordinate if it doesn't already exist. if not cube.coords('realization'): realization = np.int32(filename[-6:-3]) - ensemble_coord = icoords.AuxCoord(realization, standard_name='realization') + ensemble_coord = icoords.AuxCoord(realization, standard_name='realization', units="1") cube.add_aux_coord(ensemble_coord) filename = iris.sample_data_path('GloSea4', '*.pp') diff --git a/docs/iris/src/userguide/plotting_a_cube.rst b/docs/src/userguide/plotting_a_cube.rst similarity index 81% rename from docs/iris/src/userguide/plotting_a_cube.rst rename to docs/src/userguide/plotting_a_cube.rst index d82cbbb027..f152690835 100644 --- a/docs/iris/src/userguide/plotting_a_cube.rst +++ b/docs/src/userguide/plotting_a_cube.rst @@ -1,19 +1,22 @@ .. _plotting_a_cube: -================================== -Plotting a cube -================================== +=============== +Plotting a Cube +=============== Iris utilises the power of Python's -`Matplotlib `_ package in order to generate +`Matplotlib `_ package in order to generate high quality, production ready 1D and 2D plots. -The functionality of the Matplotlib -`pyplot `_ module has +The functionality of the Matplotlib :py:mod:`~matplotlib.pyplot` module has been extended within Iris to facilitate easy visualisation of a cube's data. +.. seealso:: + + Relevant gallery example: + :ref:`sphx_glr_generated_gallery_general_plot_zonal_means.py` (Plotting with multiple axes) *************************** -Matplotlib's pyplot basics +Matplotlib's Pyplot Basics *************************** A simple line plot can be created using the @@ -35,7 +38,7 @@ There are two modes of rendering within Matplotlib; **interactive** and **non-interactive**. -Interactive plot rendering +Interactive Plot Rendering ========================== The previous example was *non-interactive* as the figure is only rendered *after* the call to :py:func:`plt.show() `. @@ -83,9 +86,9 @@ so ensure that interactive mode is turned off with:: plt.interactive(False) - -Saving a plot -============= +=============== + Saving a Plot +=============== The :py:func:`matplotlib.pyplot.savefig` function is similar to **plt.show()** in that they are both *non-interactive* visualisation modes. @@ -102,18 +105,18 @@ see :py:func:`matplotlib.pyplot.savefig`). Some of the formats which are supported by **plt.savefig**: - ====== ====== ====================================================================== - Format Type Description - ====== ====== ====================================================================== - EPS Vector Encapsulated PostScript - PDF Vector Portable Document Format - PNG Raster Portable Network Graphics, a format with a lossless compression method - PS Vector PostScript, ideal for printer output - SVG Vector Scalable Vector Graphics, XML based - ====== ====== ====================================================================== +====== ====== ====================================================================== +Format Type Description +====== ====== ====================================================================== +EPS Vector Encapsulated PostScript +PDF Vector Portable Document Format +PNG Raster Portable Network Graphics, a format with a lossless compression method +PS Vector PostScript, ideal for printer output +SVG Vector Scalable Vector Graphics, XML based +====== ====== ====================================================================== ****************** -Iris cube plotting +Iris Cube Plotting ****************** The Iris modules :py:mod:`iris.quickplot` and :py:mod:`iris.plot` extend the @@ -126,12 +129,12 @@ wrapper functions. As a rule of thumb: - * if you wish to do a visualisation with a cube, use ``iris.plot`` or - ``iris.quickplot``. - * if you wish to show, save or manipulate **any** visualisation, - including ones created with Iris, use ``matplotlib.pyplot``. - * if you wish to create a non cube visualisation, also use - ``matplotlib.pyplot``. +* if you wish to do a visualisation with a cube, use ``iris.plot`` or + ``iris.quickplot``. +* if you wish to show, save or manipulate **any** visualisation, + including ones created with Iris, use ``matplotlib.pyplot``. +* if you wish to create a non cube visualisation, also use + ``matplotlib.pyplot``. The ``iris.quickplot`` module is exactly the same as the ``iris.plot`` module, except that ``quickplot`` will add a title, x and y labels and a colorbar @@ -149,7 +152,7 @@ where appropriate. import iris.quickplot as qplt -Plotting 1-dimensional cubes +Plotting 1-Dimensional Cubes ============================ The simplest 1D plot is achieved with the :py:func:`iris.plot.plot` function. @@ -181,7 +184,7 @@ For example, the previous plot can be improved quickly by replacing -Multi-line plot +Multi-Line Plot --------------- A multi-lined (or over-plotted) plot, with a legend, can be achieved easily by @@ -190,7 +193,7 @@ and providing the label keyword to identify it. Once all of the lines have been added the :func:`matplotlib.pyplot.legend` function can be called to indicate that a legend is desired: -.. plot:: ../example_code/General/lineplot_with_legend.py +.. plot:: ../gallery_code/general/plot_lineplot_with_legend.py :include-source: This example of consecutive ``qplt.plot`` calls coupled with the @@ -206,19 +209,19 @@ the temperature at some latitude cross-sections. that any useful functions or variables defined within the script can be imported into other scripts without running all of the code and thus creating an unwanted plot. This is discussed in more detail at - ``_. + ``_. In order to run this example, you will need to copy the code into a file - and run it using ``python2.7 my_file.py``. + and run it using ``python my_file.py``. -Plotting 2-dimensional cubes +Plotting 2-Dimensional Cubes ============================ -Creating maps +Creating Maps ------------- Whenever a 2D plot is created using an :class:`iris.coord_systems.CoordSystem`, -a cartopy :class:`~cartopy.mpl.GeoAxes` instance is created, which can be +a cartopy :class:`~cartopy.mpl.geoaxes.GeoAxes` instance is created, which can be accessed with the :func:`matplotlib.pyplot.gca` function. Given the current map, you can draw gridlines and coastlines amongst other @@ -226,11 +229,11 @@ things. .. seealso:: - :meth:`cartopy's gridlines() `, - :meth:`cartopy's coastlines() `. + :meth:`cartopy's gridlines() `, + :meth:`cartopy's coastlines() `. -Cube contour +Cube Contour ------------ A simple contour plot of a cube can be created with either the :func:`iris.plot.contour` or :func:`iris.quickplot.contour` functions: @@ -239,7 +242,7 @@ A simple contour plot of a cube can be created with either the :include-source: -Cube filled contour +Cube Filled Contour ------------------- Similarly a filled contour plot of a cube can be created with the :func:`iris.plot.contourf` or :func:`iris.quickplot.contourf` functions: @@ -248,7 +251,7 @@ Similarly a filled contour plot of a cube can be created with the :include-source: -Cube block plot +Cube Block Plot --------------- In some situations the underlying coordinates are better represented with a continuous bounded coordinate, in which case a "block" plot may be more @@ -268,11 +271,11 @@ or :func:`iris.quickplot.pcolormesh`. .. _brewer-info: *********************** -Brewer colour palettes +Brewer Colour Palettes *********************** Iris includes colour specifications and designs developed by -`Cynthia Brewer `_. +`Cynthia Brewer `_ These colour schemes are freely available under the following licence:: Apache-Style Software License for ColorBrewer software and ColorBrewer Color Schemes @@ -282,7 +285,7 @@ These colour schemes are freely available under the following licence:: Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR @@ -290,7 +293,7 @@ These colour schemes are freely available under the following licence:: specific language governing permissions and limitations under the License. To include a reference in a journal article or report please refer to -`section 5 `_ +`section 5 `_ in the citation guidance provided by Cynthia Brewer. For adding citations to Iris plots, see :ref:`brewer-cite` (below). @@ -298,12 +301,12 @@ For adding citations to Iris plots, see :ref:`brewer-cite` (below). Available Brewer Schemes ======================== The following subset of Brewer palettes found at -`colorbrewer.org `_ are available within Iris. +`colorbrewer2.org `_ are available within Iris. .. plot:: userguide/plotting_examples/brewer.py -Plotting with Brewer +Plotting With Brewer ==================== To plot a cube using a Brewer colour palette, simply select one of the Iris @@ -316,7 +319,7 @@ become available once :mod:`iris.plot` or :mod:`iris.quickplot` are imported. .. _brewer-cite: -Adding a citation +Adding a Citation ================= Citations can be easily added to a plot using the diff --git a/docs/src/userguide/plotting_examples/1d_quickplot_simple.py b/docs/src/userguide/plotting_examples/1d_quickplot_simple.py new file mode 100644 index 0000000000..58d0918dcb --- /dev/null +++ b/docs/src/userguide/plotting_examples/1d_quickplot_simple.py @@ -0,0 +1,16 @@ +"""Simple 1D plot using iris.quickplot.plot().""" + +import matplotlib.pyplot as plt + +import iris +import iris.quickplot as qplt + +fname = iris.sample_data_path("air_temp.pp") +temperature = iris.load_cube(fname) + +# Take a 1d slice using array style indexing. +temperature_1d = temperature[5, :] + +qplt.plot(temperature_1d) + +plt.show() diff --git a/docs/src/userguide/plotting_examples/1d_simple.py b/docs/src/userguide/plotting_examples/1d_simple.py new file mode 100644 index 0000000000..4511a0fbe1 --- /dev/null +++ b/docs/src/userguide/plotting_examples/1d_simple.py @@ -0,0 +1,16 @@ +"""Simple 1D plot using iris.plot.plot().""" + +import matplotlib.pyplot as plt + +import iris +import iris.plot as iplt + +fname = iris.sample_data_path("air_temp.pp") +temperature = iris.load_cube(fname) + +# Take a 1d slice using array style indexing. +temperature_1d = temperature[5, :] + +iplt.plot(temperature_1d) + +plt.show() diff --git a/docs/src/userguide/plotting_examples/1d_with_legend.py b/docs/src/userguide/plotting_examples/1d_with_legend.py new file mode 100644 index 0000000000..b325657766 --- /dev/null +++ b/docs/src/userguide/plotting_examples/1d_with_legend.py @@ -0,0 +1,43 @@ +"""Simple 1D plot using iris.plot.plot() with a legend.""" + +import matplotlib.pyplot as plt + +import iris +import iris.plot as iplt + +fname = iris.sample_data_path("air_temp.pp") + +# Load exactly one cube from the given file +temperature = iris.load_cube(fname) + +# We are only interested in a small number of longitudes (the 4 after and +# including the 5th element), so index them out +temperature = temperature[5:9, :] + +for cube in temperature.slices("longitude"): + # Create a string label to identify this cube (i.e. latitude: value) + cube_label = "latitude: %s" % cube.coord("latitude").points[0] + + # Plot the cube, and associate it with a label + iplt.plot(cube, label=cube_label) + +# Match the longitude range to global +max_lon = temperature.coord("longitude").points.max() +min_lon = temperature.coord("longitude").points.min() +plt.xlim(min_lon, max_lon) + +# Add the legend with 2 columns +plt.legend(ncol=2) + +# Put a grid on the plot +plt.grid(True) + +# Provide some axis labels +plt.ylabel("Temperature / kelvin") +plt.xlabel("Longitude / degrees") + +# And a sensible title +plt.suptitle("Air Temperature", fontsize=20, y=0.9) + +# Finally, show it. +plt.show() diff --git a/docs/src/userguide/plotting_examples/brewer.py b/docs/src/userguide/plotting_examples/brewer.py new file mode 100644 index 0000000000..e42ad57cc0 --- /dev/null +++ b/docs/src/userguide/plotting_examples/brewer.py @@ -0,0 +1,29 @@ +"""Plot a chart of all Brewer colour schemes.""" + +import matplotlib.pyplot as plt +import numpy as np + +import iris.palette + + +def main(): + a = np.linspace(0, 1, 256).reshape(1, -1) + a = np.vstack((a, a)) + + maps = sorted(iris.palette.CMAP_BREWER) + nmaps = len(maps) + + fig = plt.figure(figsize=(7, 10)) + fig.subplots_adjust(top=0.99, bottom=0.01, left=0.2, right=0.99) + for i, m in enumerate(maps): + ax = plt.subplot(nmaps, 1, i + 1) + plt.axis("off") + plt.imshow(a, aspect="auto", cmap=plt.get_cmap(m), origin="lower") + pos = list(ax.get_position().bounds) + fig.text(pos[0] - 0.01, pos[1], m, fontsize=8, horizontalalignment="right") + + plt.show() + + +if __name__ == "__main__": + main() diff --git a/docs/src/userguide/plotting_examples/cube_blockplot.py b/docs/src/userguide/plotting_examples/cube_blockplot.py new file mode 100644 index 0000000000..1f4d3985a3 --- /dev/null +++ b/docs/src/userguide/plotting_examples/cube_blockplot.py @@ -0,0 +1,15 @@ +"""Cube block plot using using iris.plot.pcolormesh().""" + +import matplotlib.pyplot as plt + +import iris +import iris.quickplot as qplt + +# Load the data for a single value of model level number. +fname = iris.sample_data_path("hybrid_height.nc") +temperature_cube = iris.load_cube(fname, iris.Constraint(model_level_number=1)) + +# Draw the block plot. +qplt.pcolormesh(temperature_cube) + +plt.show() diff --git a/docs/src/userguide/plotting_examples/cube_brewer_cite_contourf.py b/docs/src/userguide/plotting_examples/cube_brewer_cite_contourf.py new file mode 100644 index 0000000000..4e28510e43 --- /dev/null +++ b/docs/src/userguide/plotting_examples/cube_brewer_cite_contourf.py @@ -0,0 +1,26 @@ +"""Addind a citation for a plot using iris.plot.citation().""" + +import matplotlib.pyplot as plt + +import iris +import iris.plot as iplt +import iris.quickplot as qplt + +fname = iris.sample_data_path("air_temp.pp") +temperature_cube = iris.load_cube(fname) + +# Get the Purples "Brewer" palette. +brewer_cmap = plt.get_cmap("brewer_Purples_09") + +# Draw the contours, with n-levels set for the map colours (9). +# NOTE: needed as the map is non-interpolated, but matplotlib does not provide +# any special behaviour for these. +qplt.contourf(temperature_cube, brewer_cmap.N, cmap=brewer_cmap) + +# Add a citation to the plot. +iplt.citation(iris.plot.BREWER_CITE) + +# Add coastlines to the map created by contourf. +plt.gca().coastlines() + +plt.show() diff --git a/docs/src/userguide/plotting_examples/cube_brewer_contourf.py b/docs/src/userguide/plotting_examples/cube_brewer_contourf.py new file mode 100644 index 0000000000..94692c924c --- /dev/null +++ b/docs/src/userguide/plotting_examples/cube_brewer_contourf.py @@ -0,0 +1,23 @@ +"""Plot a cube with a Brewer colour palette using iris.quickplot.contourf().""" + +import matplotlib.cm as mpl_cm +import matplotlib.pyplot as plt + +import iris +import iris.quickplot as qplt + +fname = iris.sample_data_path("air_temp.pp") +temperature_cube = iris.load_cube(fname) + +# Load a Cynthia Brewer palette. +brewer_cmap = mpl_cm.get_cmap("brewer_OrRd_09") + +# Draw the contours, with n-levels set for the map colours (9). +# NOTE: needed as the map is non-interpolated, but matplotlib does not provide +# any special behaviour for these. +qplt.contourf(temperature_cube, brewer_cmap.N, cmap=brewer_cmap) + +# Add coastlines to the map created by contourf. +plt.gca().coastlines() + +plt.show() diff --git a/docs/src/userguide/plotting_examples/cube_contour.py b/docs/src/userguide/plotting_examples/cube_contour.py new file mode 100644 index 0000000000..e338d395ff --- /dev/null +++ b/docs/src/userguide/plotting_examples/cube_contour.py @@ -0,0 +1,24 @@ +"""Simple contour plot of a cube. + +Can use iris.plot.contour() or iris.quicplot.contour(). + +""" + +import matplotlib.pyplot as plt + +import iris +import iris.quickplot as qplt + +fname = iris.sample_data_path("air_temp.pp") +temperature_cube = iris.load_cube(fname) + +# Add a contour, and put the result in a variable called contour. +contour = qplt.contour(temperature_cube) + +# Add coastlines to the map created by contour. +plt.gca().coastlines() + +# Add contour labels based on the contour we have just created. +plt.clabel(contour, inline=False) + +plt.show() diff --git a/docs/src/userguide/plotting_examples/cube_contourf.py b/docs/src/userguide/plotting_examples/cube_contourf.py new file mode 100644 index 0000000000..b76645b380 --- /dev/null +++ b/docs/src/userguide/plotting_examples/cube_contourf.py @@ -0,0 +1,21 @@ +"""Simple filled contour plot of a cube. + +Can use iris.plot.contour() or iris.quickplot.contour(). + +""" + +import matplotlib.pyplot as plt + +import iris +import iris.quickplot as qplt + +fname = iris.sample_data_path("air_temp.pp") +temperature_cube = iris.load_cube(fname) + +# Draw the contour with 25 levels. +qplt.contourf(temperature_cube, 25) + +# Add coastlines to the map created by contourf. +plt.gca().coastlines() + +plt.show() diff --git a/docs/src/userguide/plotting_examples/masking_brazil_plot.py b/docs/src/userguide/plotting_examples/masking_brazil_plot.py new file mode 100644 index 0000000000..d1a75a700f --- /dev/null +++ b/docs/src/userguide/plotting_examples/masking_brazil_plot.py @@ -0,0 +1,25 @@ +"""Global cube masked to Brazil and plotted with quickplot.""" + +import cartopy.io.shapereader as shpreader +import matplotlib.pyplot as plt + +import iris +import iris.quickplot as qplt +from iris.util import mask_cube_from_shapefile + +country_shp_reader = shpreader.Reader( + shpreader.natural_earth( + resolution="110m", category="cultural", name="admin_0_countries" + ) +) +brazil_shp = [ + country.geometry + for country in country_shp_reader.records() + if "Brazil" in country.attributes["NAME_LONG"] +][0] + +cube = iris.load_cube(iris.sample_data_path("air_temp.pp")) +brazil_cube = mask_cube_from_shapefile(cube, brazil_shp) + +qplt.pcolormesh(brazil_cube) +plt.show() diff --git a/docs/src/userguide/real_and_lazy_data.rst b/docs/src/userguide/real_and_lazy_data.rst new file mode 100644 index 0000000000..2b3ecf9e64 --- /dev/null +++ b/docs/src/userguide/real_and_lazy_data.rst @@ -0,0 +1,275 @@ + +.. _real_and_lazy_data: + + +.. testsetup:: * + + import dask.array as da + import iris + from iris.cube import CubeList + import numpy as np + + +================== +Real and Lazy Data +================== + +We have seen in the :doc:`iris_cubes` section of the user guide that +Iris cubes contain data and metadata about a phenomenon. The data element of a cube +is always an array, but the array may be either "real" or "lazy". + +In this section of the user guide we will look specifically at the concepts of +real and lazy data as they apply to the cube and other data structures in Iris. + + +What is Real and Lazy Data? +--------------------------- + +In Iris, we use the term **real data** to describe data arrays that are loaded +into memory. Real data is typically provided as a +`NumPy array `_, +which has a shape and data type that are used to describe the array's data points. +Each data point takes up a small amount of memory, which means large NumPy arrays can +take up a large amount of memory. + +Conversely, we use the term **lazy data** to describe data that is not loaded into memory. +(This is sometimes also referred to as **deferred data**.) +In Iris, lazy data is provided as a +`dask array `_. +A dask array also has a shape and data type +but the dask array's data points remain on disk and only loaded into memory in +small +`chunks `__ +when absolutely necessary. This has key performance benefits for +handling large amounts of data, where both calculation time and storage +requirements can be significantly reduced. + +In Iris, when actual data values are needed from a lazy data array, it is +*'realised'* : this means that all the actual values are read in from the file, +and a 'real' +(i.e. `numpy `_) +array replaces the lazy array within the Iris object. + +Following realisation, the Iris object just contains the actual ('real') +data, so the time cost of reading all the data is not incurred again. +From here on, access to the data is fast, but it now occupies its full memory space. + +In particular, any direct reference to a `cube.data` will realise the cube data +content : any lazy content is lost as the data is read from file, and the cube +content is replaced with a real array. +This is also referred to simply as "touching" the data. + +See the section :ref:`when_real_data` +for more examples of this. + +You can check whether a cube has real data or lazy data by using the method +:meth:`~iris.cube.Cube.has_lazy_data`. For example:: + + >>> cube = iris.load_cube(iris.sample_data_path('air_temp.pp')) + >>> cube.has_lazy_data() + True + # Realise the lazy data. + >>> cube.data + >>> cube.has_lazy_data() + False + + +Benefits +-------- + +The primary advantage of using lazy data is that it enables +`out-of-core processing `_; +that is, the loading and manipulating of datasets without loading the full data into memory. + +There are two key benefits from this : + +**Firstly**, the result of a calculation on a large dataset often occupies much +less storage space than the source data -- such as for instance a maximum data +value calculated over a large number of datafiles. +In these cases the result can be computed in sections, without ever requiring the +entire source dataset to be loaded, thus drastically reducing memory footprint. +This strategy of task division can also enable reduced execution time through the effective +use of parallel processing capabilities. + +**Secondly**, it is often simply convenient to form a calculation on a large +dataset, of which only a certain portion is required at any one time +-- for example, plotting individual timesteps from a large sequence. +In such cases, a required portion can be extracted and realised without calculating the entire result. + +.. _when_real_data: + +When Does My Data Become Real? +------------------------------ + +Certain operations, such as cube indexing and statistics, can be +performed in a lazy fashion, producing a 'lazy' result from a lazy input, so +that no realisation immediately occurs. +However other operations, such as plotting or printing data values, will always +trigger the 'realisation' of data. + +When you load a dataset using Iris the data array will almost always initially be +a lazy array. This section details some operations that will realise lazy data +as well as some operations that will maintain lazy data. We use the term **realise** +to mean converting lazy data into real data. + +Most operations on data arrays can be run equivalently on both real and lazy data. +If the data array is real then the operation will be run on the data array +immediately. The results of the operation will be available as soon as processing is completed. +If the data array is lazy then the operation will be deferred and the data array will +remain lazy until you request the result (such as when you read from ``cube.data``):: + + >>> cube = iris.load_cube(iris.sample_data_path('air_temp.pp')) + >>> cube.has_lazy_data() + True + >>> cube += 5 + >>> cube.has_lazy_data() + True + +The process by which the operation is deferred until the result is requested is +referred to as **lazy evaluation**. + +Certain operations, including regridding and plotting, can only be run on real data. +Calling such operations on lazy data will automatically realise your lazy data. + +You can also realise (and so load into memory) your cube's lazy data if you 'touch' the data. +To 'touch' the data means directly accessing the data by calling ``cube.data``, +as in the previous example. + +Core Data +^^^^^^^^^ + +Cubes have the concept of "core data". This returns the cube's data in its +current state: + +* If a cube has lazy data, calling the cube's :meth:`~iris.cube.Cube.core_data` method + will return the cube's lazy dask array. Calling the cube's + :meth:`~iris.cube.Cube.core_data` method **will never realise** the cube's data. +* If a cube has real data, calling the cube's :meth:`~iris.cube.Cube.core_data` method + will return the cube's real NumPy array. + +For example:: + + >>> cube = iris.load_cube(iris.sample_data_path('air_temp.pp')) + >>> cube.has_lazy_data() + True + + >>> the_data = cube.core_data() + >>> type(the_data) + + >>> cube.has_lazy_data() + True + + # Realise the lazy data. + >>> cube.data + >>> the_data = cube.core_data() + >>> type(the_data) + + >>> cube.has_lazy_data() + False + + +Coordinates +----------- + +In the same way that Iris cubes contain a data array, Iris coordinates contain a +points array and an optional bounds array. +Coordinate points and bounds arrays can also be real or lazy: + +* A :class:`~iris.coords.DimCoord` will only ever have **real** points and bounds + arrays because of monotonicity checks that realise lazy arrays. +* An :class:`~iris.coords.AuxCoord` can have **real or lazy** points and bounds. +* An :class:`~iris.aux_factory.AuxCoordFactory` (or derived coordinate) + can have **real or lazy** points and bounds. If all of the + :class:`~iris.coords.AuxCoord` instances used to construct the derived coordinate + have real points and bounds then the derived coordinate will have real points + and bounds, otherwise the derived coordinate will have lazy points and bounds. + +Iris cubes and coordinates have very similar interfaces, which extends to accessing +coordinates' lazy points and bounds: + +.. doctest:: + + >>> cube = iris.load_cube(iris.sample_data_path('orca2_votemper.nc'),'votemper') + + >>> dim_coord = cube.coord('depth') + >>> print(dim_coord.has_lazy_points()) + False + >>> print(dim_coord.has_bounds()) + True + >>> print(dim_coord.has_lazy_bounds()) + False + + >>> aux_coord = cube.coord('longitude') + >>> print(aux_coord.has_lazy_points()) + True + >>> print(aux_coord.has_bounds()) + True + >>> print(aux_coord.has_lazy_bounds()) + True + + # Realise the lazy points. This will **not** realise the lazy bounds. + >>> points = aux_coord.points + >>> print(aux_coord.has_lazy_points()) + False + >>> print(aux_coord.has_lazy_bounds()) + True + + # Fetch a derived coordinate, from a different file: These can also have lazy data. + >>> cube2 = iris.load_cube(iris.sample_data_path('hybrid_height.nc'), 'air_potential_temperature') + >>> derived_coord = cube2.coord('altitude') + >>> print(derived_coord.has_lazy_points()) + True + >>> print(derived_coord.has_bounds()) + True + >>> print(derived_coord.has_lazy_bounds()) + True + + +Dask Processing Options +----------------------- + +Iris uses `Dask `_ to provide lazy data arrays for +both Iris cubes and coordinates, and for computing deferred operations on lazy arrays. + +Dask provides processing options to control how deferred operations on lazy arrays +are computed. This is provided via the ``dask.set_options`` interface. See the +`dask documentation `_ +for more information on setting dask processing options. + + +.. _delayed_netcdf_save: + +Delayed NetCDF Saving +--------------------- + +When saving data to NetCDF files, it is possible to *delay* writing lazy content to the +output file, to be performed by `Dask `_ later, +thus enabling parallel save operations. + +This works in the following way : + +1. an :func:`iris.save` call is made, with a NetCDF file output and the additional + keyword ``compute=False``. + This is currently *only* available when saving to NetCDF, so it is documented in + the Iris NetCDF file format API. See: :func:`iris.fileformats.netcdf.save`. + +2. the call creates the output file, but does not fill in variables' data, where + the data is a lazy array in the Iris object. Instead, these variables are + initially created "empty". + +3. the :meth:`~iris.save` call returns a ``result`` which is a + :class:`~dask.delayed.Delayed` object. + +4. the save can be completed later by calling ``result.compute()``, or by passing it + to the :func:`dask.compute` call. + +The benefit of this, is that costly data transfer operations can be performed in +parallel with writes to other data files. Also, where array contents are calculated +from shared lazy input data, these can be computed in parallel efficiently by Dask +(i.e. without re-fetching), similar to what :meth:`iris.cube.CubeList.realise_data` +can do. + +.. note:: + This feature does **not** enable parallel writes to the *same* NetCDF output file. + That can only be done on certain operating systems, with a specially configured + build of the NetCDF C library, and is not supported by Iris at present. diff --git a/docs/src/userguide/regridding_plots/interpolate_column.py b/docs/src/userguide/regridding_plots/interpolate_column.py new file mode 100644 index 0000000000..681af0c998 --- /dev/null +++ b/docs/src/userguide/regridding_plots/interpolate_column.py @@ -0,0 +1,79 @@ +"""Interpolate using iris.analysis.Linear().""" + +import matplotlib.pyplot as plt +import numpy as np + +import iris.analysis +import iris.quickplot as qplt + +fname = iris.sample_data_path("hybrid_height.nc") +column = iris.load_cube(fname, "air_potential_temperature")[:, 0, 0] + +alt_coord = column.coord("altitude") + +# Interpolate the "perfect" linear interpolation. Really this is just +# a high number of interpolation points, in this case 1000 of them. +altitude_points = [ + ( + "altitude", + np.linspace(min(alt_coord.points), max(alt_coord.points), 1000), + ) +] +scheme = iris.analysis.Linear() +linear_column = column.interpolate(altitude_points, scheme) + +# Now interpolate the data onto 10 evenly spaced altitude levels, +# as we did in the example. +altitude_points = [("altitude", np.linspace(400, 1250, 10))] +scheme = iris.analysis.Linear() +new_column = column.interpolate(altitude_points, scheme) + +plt.figure(figsize=(5, 4), dpi=100) + +# Plot the black markers for the original data. +qplt.plot( + column, + marker="o", + color="black", + linestyle="", + markersize=3, + label="Original values", + zorder=2, +) + +# Plot the gray line to display the linear interpolation. +qplt.plot( + linear_column, + color="gray", + label="Linear interpolation", + zorder=0, +) + +# Plot the red markers for the new data. +qplt.plot( + new_column, + marker="D", + color="red", + linestyle="", + label="Interpolated values", + zorder=1, +) + +ax = plt.gca() +# Space the plot such that the labels appear correctly. +plt.subplots_adjust(left=0.17, bottom=0.14) + +# Limit the plot to a maximum of 5 ticks. +ax.xaxis.get_major_locator().set_params(nbins=5) + +# Prevent matplotlib from using "offset" notation on the xaxis. +ax.xaxis.get_major_formatter().set_useOffset(False) + +# Put some space between the line and the axes. +ax.margins(0.05) + +# Place gridlines and a legend. +ax.grid() +plt.legend(loc="lower right") + +plt.show() diff --git a/docs/src/userguide/regridding_plots/regridded_to_global.py b/docs/src/userguide/regridding_plots/regridded_to_global.py new file mode 100644 index 0000000000..8e43f1471a --- /dev/null +++ b/docs/src/userguide/regridding_plots/regridded_to_global.py @@ -0,0 +1,23 @@ +"""Interpolate using iris.analysis.Linear().""" + +import matplotlib.pyplot as plt + +import iris +import iris.analysis +import iris.plot as iplt + +global_air_temp = iris.load_cube(iris.sample_data_path("air_temp.pp")) +rotated_psl = iris.load_cube(iris.sample_data_path("rotated_pole.nc")) + +scheme = iris.analysis.Linear(extrapolation_mode="mask") +global_psl = rotated_psl.regrid(global_air_temp, scheme) + +plt.figure(figsize=(4, 3)) +iplt.pcolormesh(global_psl) +plt.title("Air pressure\non a global longitude latitude grid") +ax = plt.gca() +ax.coastlines() +ax.gridlines() +ax.set_extent([-90, 70, 10, 80]) + +plt.show() diff --git a/docs/src/userguide/regridding_plots/regridded_to_global_area_weighted.py b/docs/src/userguide/regridding_plots/regridded_to_global_area_weighted.py new file mode 100644 index 0000000000..6c906ba87b --- /dev/null +++ b/docs/src/userguide/regridding_plots/regridded_to_global_area_weighted.py @@ -0,0 +1,50 @@ +"""Regrid using iris.analysis.AreaWeighted.""" + +import matplotlib.colors +import matplotlib.pyplot as plt +import numpy as np + +import iris +import iris.analysis +import iris.plot as iplt + +global_air_temp = iris.load_cube(iris.sample_data_path("air_temp.pp")) + +regional_ash = iris.load_cube(iris.sample_data_path("NAME_output.txt")) +regional_ash = regional_ash.collapsed("flight_level", iris.analysis.SUM) + +# Mask values so low that they are anomalous. +regional_ash.data = np.ma.masked_less(regional_ash.data, 5e-6) + +norm = matplotlib.colors.LogNorm(5e-6, 0.0175) + +global_air_temp.coord("longitude").guess_bounds() +global_air_temp.coord("latitude").guess_bounds() + +fig = plt.figure(figsize=(8, 4.5)) + +plt.subplot(2, 2, 1) +iplt.pcolormesh(regional_ash, norm=norm) +plt.title("Volcanic ash total\nconcentration not regridded", size="medium") + +for subplot_num, mdtol in zip([2, 3, 4], [0, 0.5, 1]): + plt.subplot(2, 2, subplot_num) + scheme = iris.analysis.AreaWeighted(mdtol=mdtol) + global_ash = regional_ash.regrid(global_air_temp, scheme) + iplt.pcolormesh(global_ash, norm=norm) + plt.title( + "Volcanic ash total concentration\n" + "regridded with AreaWeighted(mdtol={})".format(mdtol), + size="medium", + ) + +plt.subplots_adjust(hspace=0, wspace=0.05, left=0.001, right=0.999, bottom=0, top=0.955) + +# Iterate over each of the figure's axes, adding coastlines, gridlines +# and setting the extent. +for ax in fig.axes: + ax.coastlines("50m") + ax.gridlines() + ax.set_extent([-80, 40, 31, 75]) + +plt.show() diff --git a/docs/src/userguide/regridding_plots/regridded_to_rotated.py b/docs/src/userguide/regridding_plots/regridded_to_rotated.py new file mode 100644 index 0000000000..31afdb7da1 --- /dev/null +++ b/docs/src/userguide/regridding_plots/regridded_to_rotated.py @@ -0,0 +1,23 @@ +"""Rotated pole.""" + +import matplotlib.pyplot as plt + +import iris +import iris.analysis +import iris.plot as iplt + +global_air_temp = iris.load_cube(iris.sample_data_path("air_temp.pp")) +rotated_psl = iris.load_cube(iris.sample_data_path("rotated_pole.nc")) + +rotated_air_temp = global_air_temp.regrid(rotated_psl, iris.analysis.Linear()) + + +plt.figure(figsize=(4, 3)) + +iplt.pcolormesh(rotated_air_temp, norm=plt.Normalize(260, 300)) +plt.title("Air temperature\non a limited area rotated pole grid") +ax = plt.gca() +ax.coastlines(resolution="50m") +ax.gridlines() + +plt.show() diff --git a/docs/src/userguide/regridding_plots/regridding_plot.py b/docs/src/userguide/regridding_plots/regridding_plot.py new file mode 100644 index 0000000000..ed45822a51 --- /dev/null +++ b/docs/src/userguide/regridding_plots/regridding_plot.py @@ -0,0 +1,28 @@ +"""Plot regridded data.""" + +import matplotlib.pyplot as plt + +import iris +import iris.plot as iplt + +# Load the data. +global_air_temp = iris.load_cube(iris.sample_data_path("air_temp.pp")) +rotated_psl = iris.load_cube(iris.sample_data_path("rotated_pole.nc")) + +plt.figure(figsize=(9, 3.5)) + +plt.subplot(1, 2, 1) +iplt.pcolormesh(global_air_temp, norm=plt.Normalize(260, 300)) +plt.title("Air temperature\non a global longitude latitude grid") +ax = plt.gca() +ax.coastlines() +ax.gridlines() + +plt.subplot(1, 2, 2) +iplt.pcolormesh(rotated_psl) +plt.title("Air pressure\non a limited area rotated pole grid") +ax = plt.gca() +ax.coastlines(resolution="50m") +ax.gridlines() + +plt.show() diff --git a/docs/iris/src/userguide/saving_iris_cubes.rst b/docs/src/userguide/saving_iris_cubes.rst similarity index 81% rename from docs/iris/src/userguide/saving_iris_cubes.rst rename to docs/src/userguide/saving_iris_cubes.rst index ecf2210810..2ffc8c47d3 100644 --- a/docs/iris/src/userguide/saving_iris_cubes.rst +++ b/docs/src/userguide/saving_iris_cubes.rst @@ -1,13 +1,13 @@ .. _saving_iris_cubes: ================== -Saving Iris cubes +Saving Iris Cubes ================== Iris supports the saving of cubes and cube lists to: -* CF netCDF (1.5) -* GRIB (edition 2) +* CF netCDF (version 1.7) +* GRIB edition 2 (if `iris-grib `_ is installed) * Met Office PP @@ -30,54 +30,72 @@ and the keyword argument `saver` is not required. .. code-block:: python - cube = iris.load_cube('somefile.nc') + cube = iris.load_cube("somefile.nc") # The next line causes data loss in 'somefile.nc' and the cube. - iris.save(cube, 'somefile.nc') + iris.save(cube, "somefile.nc") In general, overwriting a file which is the source for any lazily loaded data can result in corruption. Users should proceed with caution when attempting to overwrite an existing file. -Controlling the save process ------------------------------ +Controlling the Save Process +---------------------------- The :py:func:`iris.save` function passes all other keywords through to the saver function defined, or automatically set from the file extension. This enables saver specific functionality to be called. +.. doctest:: + >>> # Save a cube to PP >>> iris.save(cubes[0], "myfile.pp") >>> # Save a cube list to a PP file, appending to the contents of the file >>> # if it already exists >>> iris.save(cubes, "myfile.pp", append=True) + >>> # Save a cube to netCDF, defaults to NETCDF4 file format >>> iris.save(cubes[0], "myfile.nc") >>> # Save a cube list to netCDF, using the NETCDF3_CLASSIC storage option >>> iris.save(cubes, "myfile.nc", netcdf_format="NETCDF3_CLASSIC") +.. testcleanup:: + + import pathlib + p = pathlib.Path("myfile.pp") + if p.exists(): + p.unlink() + p = pathlib.Path("myfile.nc") + if p.exists(): + p.unlink() + See * :py:func:`iris.fileformats.netcdf.save` -* :py:func:`iris.fileformats.grib.save_grib2` * :py:func:`iris.fileformats.pp.save` for more details on supported arguments for the individual savers. -Customising the save process ------------------------------ +.. note:: + + The existence of a keyword argument for one saver does not guarantee the + same works for all savers. For example, it isn't possible to pass an + ``append`` keyword argument to the netCDF saver (see :ref:`netcdf_save`). + +Customising the Save Process +---------------------------- When saving to GRIB or PP, the save process may be intercepted between the translation step and the file writing. This enables customisation of the output messages, based on Cube metadata if required, over and above the translations supplied by Iris. For example, a GRIB2 message with a particular known long_name may need to be saved to a specific parameter code and type of statistical process. This can be achieved by:: def tweaked_messages(cube): - for cube, grib_message in iris.fileformats.grib.as_pairs(cube): + for cube, grib_message in iris_grib.save_pairs_from_cube(cube): # post process the GRIB2 message, prior to saving if cube.name() == 'carefully_customised_precipitation_amount': gribapi.grib_set_long(grib_message, "typeOfStatisticalProcess", 1) gribapi.grib_set_long(grib_message, "parameterCategory", 1) gribapi.grib_set_long(grib_message, "parameterNumber", 1) yield grib_message - iris.fileformats.grib.save_messages(tweaked_messages(cubes[0]), '/tmp/agrib2.grib2') + iris_grib.save_messages(tweaked_messages(cubes[0]), '/tmp/agrib2.grib2') Similarly a PP field may need to be written out with a specific value for LBEXP. This can be achieved by:: @@ -91,15 +109,16 @@ Similarly a PP field may need to be written out with a specific value for LBEXP. yield field iris.fileformats.pp.save_fields(tweaked_fields(cubes[0]), '/tmp/app.pp') +.. _netcdf_save: -netCDF -^^^^^^^ +NetCDF +^^^^^^ NetCDF is a flexible container for metadata and cube metadata is closely related to the CF for netCDF semantics. This means that cube metadata is well represented in netCDF files, closely resembling the in memory metadata representation. Thus there is no provision for similar save customisation functionality for netCDF saving, all customisations should be applied to the cube prior to saving to netCDF. Bespoke Saver --------------- +------------- A bespoke saver may be written to support an alternative file format. This can be provided to the :py:func:`iris.save` function, enabling Iris to write to a different file format. Such a custom saver will need be written to meet the needs of the file format and to handle the metadata translation from cube metadata effectively. diff --git a/docs/src/userguide/subsetting_a_cube.rst b/docs/src/userguide/subsetting_a_cube.rst new file mode 100644 index 0000000000..7440d22adc --- /dev/null +++ b/docs/src/userguide/subsetting_a_cube.rst @@ -0,0 +1,488 @@ +.. include:: ../common_links.inc + +.. _subsetting_a_cube: + +================= +Subsetting a Cube +================= + +The :doc:`loading_iris_cubes` section of the user guide showed how to load data into multidimensional Iris cubes. +However it is often necessary to reduce the dimensionality of a cube down to something more appropriate and/or manageable, +or only examine and analyse a subset of data in a dimension. + +Iris provides several ways of reducing both the amount of data and/or the number of dimensions in your cube depending on the circumstance. +In all cases **the subset of a valid cube is itself a valid cube**. + + +.. seealso:: + + Relevant gallery examples: + - :ref:`sphx_glr_generated_gallery_general_plot_polynomial_fit.py` (Slices) + - :ref:`sphx_glr_generated_gallery_general_plot_anomaly_log_colouring.py` (Extraction) + +.. _cube_extraction: + +Cube Extraction +--------------- +A subset of a cube can be "extracted" from a multi-dimensional cube in order to reduce its dimensionality: + + >>> import iris + >>> filename = iris.sample_data_path('space_weather.nc') + >>> cube = iris.load_cube(filename, 'electron density') + >>> equator_slice = cube.extract(iris.Constraint(grid_latitude=0)) + >>> print(equator_slice) + electron density / (1E11 e/m^3) (height: 29; grid_longitude: 31) + Dimension coordinates: + height x - + grid_longitude - x + Auxiliary coordinates: + latitude - x + longitude - x + Scalar coordinates: + grid_latitude 0.0 degrees + Attributes: + Conventions 'CF-1.5' + + +In this example we start with a 3 dimensional cube, with dimensions of ``height``, ``grid_latitude`` and ``grid_longitude``, +and use :class:`iris.Constraint` to extract every point where the latitude is 0, resulting in a 2d cube with axes of ``height`` and ``grid_longitude``. + +.. _floating-point-warning: +.. warning:: + + Caution is required when using equality constraints with floating point coordinates such as ``grid_latitude``. + Printing the points of a coordinate does not necessarily show the full precision of the underlying number and it + is very easy to return no matches to a constraint when one was expected. + This can be avoided by using a function as the argument to the constraint:: + + def near_zero(cell): + """Returns true if the cell is between -0.1 and 0.1.""" + return -0.1 < cell < 0.1 + + equator_constraint = iris.Constraint(grid_latitude=near_zero) + + Often you will see this construct in shorthand using a lambda function definition:: + + equator_constraint = iris.Constraint(grid_latitude=lambda cell: -0.1 < cell < 0.1) + + +The extract method could be applied again to the *equator_slice* cube to get a further subset. + +For example to get a ``height`` of 9000 metres at the equator the following line extends the previous example:: + + equator_height_9km_slice = equator_slice.extract(iris.Constraint(height=9000)) + print(equator_height_9km_slice) + +The two steps required to get ``height`` of 9000 m at the equator can be simplified into a single constraint:: + + equator_height_9km_slice = cube.extract(iris.Constraint(grid_latitude=0, height=9000)) + print(equator_height_9km_slice) + +Alternatively, constraints can be combined using ``&``:: + + cube = iris.load_cube(filename, 'electron density') + equator_constraint = iris.Constraint(grid_latitude=0) + height_constraint = iris.Constraint(height=9000) + equator_height_9km_slice = cube.extract(equator_constraint & height_constraint) + +.. note:: + + Whilst ``&`` is supported, the ``|`` that might reasonably be expected is + not. Explanation as to why is in the :class:`iris.Constraint` reference + documentation. + + For an example of constraining to multiple ranges of the same coordinate to + generate one cube, see the :class:`iris.Constraint` reference documentation. + +A common requirement is to limit the value of a coordinate to a specific range, +this can be achieved by passing the constraint a function:: + + def below_9km(cell): + # return True or False as to whether the cell in question should be kept + return cell <= 9000 + + cube = iris.load_cube(filename, 'electron density') + height_below_9km = iris.Constraint(height=below_9km) + below_9km_slice = cube.extract(height_below_9km) + +As we saw in :doc:`loading_iris_cubes` the result of :func:`iris.load` is a :class:`CubeList `. +The ``extract`` method also exists on a :class:`CubeList ` and behaves in exactly the +same way as loading with constraints: + + >>> import iris + >>> air_temp_and_fp_6 = iris.Constraint('air_potential_temperature', forecast_period=6) + >>> level_10 = iris.Constraint(model_level_number=10) + >>> filename = iris.sample_data_path('uk_hires.pp') + >>> cubes = iris.load(filename).extract(air_temp_and_fp_6 & level_10) + >>> print(cubes) + 0: air_potential_temperature / (K) (grid_latitude: 204; grid_longitude: 187) + >>> print(cubes[0]) + air_potential_temperature / (K) (grid_latitude: 204; grid_longitude: 187) + Dimension coordinates: + grid_latitude x - + grid_longitude - x + Auxiliary coordinates: + surface_altitude x x + Derived coordinates: + altitude x x + Scalar coordinates: + forecast_period 6.0 hours + forecast_reference_time 2009-11-19 04:00:00 + level_height 395.0 m, bound=(360.0, 433.3332) m + model_level_number 10 + sigma 0.9549927, bound=(0.9589389, 0.95068014) + time 2009-11-19 10:00:00 + Attributes: + STASH m01s00i004 + source 'Data from Met Office Unified Model' + um_version '7.3' + +Cube attributes can also be part of the constraint criteria. Supposing a +cube attribute of ``STASH`` existed, as is the case when loading ``PP`` files, +then specific STASH codes can be filtered:: + + filename = iris.sample_data_path('uk_hires.pp') + level_10_with_stash = iris.AttributeConstraint(STASH='m01s00i004') & iris.Constraint(model_level_number=10) + cubes = iris.load(filename).extract(level_10_with_stash) + +.. seealso:: + + For advanced usage there are further examples in the + :class:`iris.Constraint` reference documentation. + +Constraining a Circular Coordinate Across its Boundary +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Occasionally you may need to constrain your cube with a region that crosses the +boundary of a circular coordinate (this is often the meridian or the dateline / +antimeridian). An example use-case of this is to extract the entire Pacific Ocean +from a cube whose longitudes are bounded by the dateline. + +This functionality cannot be provided reliably using constraints. Instead you should use the +functionality provided by :meth:`cube.intersection ` +to extract this region. + + +.. _using-time-constraints: + +Constraining on Time +^^^^^^^^^^^^^^^^^^^^ +Iris follows NetCDF-CF rules in representing time coordinate values as normalised, +purely numeric, values which are normalised by the calendar specified in the coordinate's +units (e.g. "days since 1970-01-01"). +However, when constraining by time we usually want to test calendar-related +aspects such as hours of the day or months of the year, so Iris +provides special features to facilitate this. + +Firstly, when Iris evaluates :class:`iris.Constraint` expressions, it will convert +time-coordinate values (points and bounds) from numbers into :class:`~datetime.datetime`-like +objects for ease of calendar-based testing. + + >>> filename = iris.sample_data_path('uk_hires.pp') + >>> cube_all = iris.load_cube(filename, 'air_potential_temperature') + >>> print('All times :\n' + str(cube_all.coord('time'))) + All times : + DimCoord : time / (hours since 1970-01-01 00:00:00, standard calendar) + points: [2009-11-19 10:00:00, 2009-11-19 11:00:00, 2009-11-19 12:00:00] + shape: (3,) + dtype: float64 + standard_name: 'time' + >>> # Define a function which accepts a datetime as its argument (this is simplified in later examples). + >>> hour_11 = iris.Constraint(time=lambda cell: cell.point.hour == 11) + >>> cube_11 = cube_all.extract(hour_11) + >>> print('Selected times :\n' + str(cube_11.coord('time'))) + Selected times : + DimCoord : time / (hours since 1970-01-01 00:00:00, standard calendar) + points: [2009-11-19 11:00:00] + shape: (1,) + dtype: float64 + standard_name: 'time' + +Secondly, the :class:`iris.time` module provides flexible time comparison +facilities. An :class:`iris.time.PartialDateTime` object can be compared to +objects such as :class:`datetime.datetime` instances, and this comparison will +then test only those 'aspects' which the PartialDateTime instance defines: + + >>> import datetime + >>> from iris.time import PartialDateTime + >>> dt = datetime.datetime(2011, 3, 7) + >>> print(dt > PartialDateTime(year=2010, month=6)) + True + >>> print(dt > PartialDateTime(month=6)) + False + +These two facilities can be combined to provide straightforward calendar-based +time selections when loading or extracting data. + +The previous constraint example can now be written as: + + >>> the_11th_hour = iris.Constraint(time=iris.time.PartialDateTime(hour=11)) + >>> print(iris.load_cube( + ... iris.sample_data_path('uk_hires.pp'), + ... 'air_potential_temperature' & the_11th_hour).coord('time')) + DimCoord : time / (hours since 1970-01-01 00:00:00, standard calendar) + points: [2009-11-19 11:00:00] + shape: (1,) + dtype: float64 + standard_name: 'time' + +It is common that a cube will need to be constrained between two given dates. +In the following example we construct a time sequence representing the first +day of every week for many years: + +.. testsetup:: timeseries_range + + import datetime + import numpy as np + from iris.time import PartialDateTime + long_ts = iris.cube.Cube(np.arange(150), long_name='data', units='1') + _mondays = iris.coords.DimCoord(7 * np.arange(150), standard_name='time', units='days since 2007-04-09') + long_ts.add_dim_coord(_mondays, 0) + + +.. doctest:: timeseries_range + :options: +NORMALIZE_WHITESPACE, +ELLIPSIS + + >>> print(long_ts.coord('time')) + DimCoord : time / (days since 2007-04-09, standard calendar) + points: [ + 2007-04-09 00:00:00, 2007-04-16 00:00:00, ..., + 2010-02-08 00:00:00, 2010-02-15 00:00:00] + shape: (150,) + dtype: int64 + standard_name: 'time' + +Given two dates in datetime format, we can select all points between them. +Instead of constraining at loaded time, we already have the time coord so +we constrain that coord using :class:`iris.cube.Cube.extract` + +.. doctest:: timeseries_range + :options: +NORMALIZE_WHITESPACE, +ELLIPSIS + + >>> d1 = datetime.datetime.strptime('20070715T0000Z', '%Y%m%dT%H%MZ') + >>> d2 = datetime.datetime.strptime('20070825T0000Z', '%Y%m%dT%H%MZ') + >>> st_swithuns_daterange_07 = iris.Constraint( + ... time=lambda cell: d1 <= cell.point < d2) + >>> within_st_swithuns_07 = long_ts.extract(st_swithuns_daterange_07) + >>> print(within_st_swithuns_07.coord('time')) + DimCoord : time / (days since 2007-04-09, standard calendar) + points: [ + 2007-07-16 00:00:00, 2007-07-23 00:00:00, 2007-07-30 00:00:00, + 2007-08-06 00:00:00, 2007-08-13 00:00:00, 2007-08-20 00:00:00] + shape: (6,) + dtype: int64 + standard_name: 'time' + +Alternatively, we may rewrite this using :class:`iris.time.PartialDateTime` +objects. + +.. doctest:: timeseries_range + :options: +NORMALIZE_WHITESPACE, +ELLIPSIS + + >>> pdt1 = PartialDateTime(year=2007, month=7, day=15) + >>> pdt2 = PartialDateTime(year=2007, month=8, day=25) + >>> st_swithuns_daterange_07 = iris.Constraint( + ... time=lambda cell: pdt1 <= cell.point < pdt2) + >>> within_st_swithuns_07 = long_ts.extract(st_swithuns_daterange_07) + >>> print(within_st_swithuns_07.coord('time')) + DimCoord : time / (days since 2007-04-09, standard calendar) + points: [ + 2007-07-16 00:00:00, 2007-07-23 00:00:00, 2007-07-30 00:00:00, + 2007-08-06 00:00:00, 2007-08-13 00:00:00, 2007-08-20 00:00:00] + shape: (6,) + dtype: int64 + standard_name: 'time' + +A more complex example might require selecting points over an annually repeating +date range. We can select points within a certain part of the year, in this case +between the 15th of July through to the 25th of August. By making use of +PartialDateTime this becomes simple: + +.. doctest:: timeseries_range + + >>> st_swithuns_daterange = iris.Constraint( + ... time=lambda cell: PartialDateTime(month=7, day=15) <= cell.point < PartialDateTime(month=8, day=25)) + >>> within_st_swithuns = long_ts.extract(st_swithuns_daterange) + ... + >>> # Note: using summary(max_values) to show more of the points + >>> print(within_st_swithuns.coord('time').summary(max_values=100)) + DimCoord : time / (days since 2007-04-09, standard calendar) + points: [ + 2007-07-16 00:00:00, 2007-07-23 00:00:00, 2007-07-30 00:00:00, + 2007-08-06 00:00:00, 2007-08-13 00:00:00, 2007-08-20 00:00:00, + 2008-07-21 00:00:00, 2008-07-28 00:00:00, 2008-08-04 00:00:00, + 2008-08-11 00:00:00, 2008-08-18 00:00:00, 2009-07-20 00:00:00, + 2009-07-27 00:00:00, 2009-08-03 00:00:00, 2009-08-10 00:00:00, + 2009-08-17 00:00:00, 2009-08-24 00:00:00] + shape: (17,) + dtype: int64 + standard_name: 'time' + +Notice how the dates printed are between the range specified in the ``st_swithuns_daterange`` +and that they span multiple years. + +The above examples involve constraining on the points of the time coordinate. Constraining +on bounds can be done in the following way:: + + filename = iris.sample_data_path('ostia_monthly.nc') + cube = iris.load_cube(filename, 'surface_temperature') + dtmin = datetime.datetime(2008, 1, 1) + cube.extract(iris.Constraint(time = lambda cell: any(bound > dtmin for bound in cell.bound))) + +The above example constrains to cells where either the upper or lower bound occur +after 1st January 2008. + +Cube Masking +-------------- + +.. _masking-from-shapefile: + +Masking from a shapefile +^^^^^^^^^^^^^^^^^^^^^^^^ + +Often we want to perform some kind of analysis over a complex geographical feature e.g., + +- over only land/sea points +- over a continent, country, or list of countries +- over a river watershed or lake basin +- over states or administrative regions of a country + +These geographical features can often be described by `ESRI Shapefiles`_. Shapefiles are a file format first developed for GIS software in the 1990s, and `Natural Earth`_ maintain a large freely usable database of shapefiles of many geographical and political divisions, +accessible via `cartopy`_. Users may also provide their own custom shapefiles for `cartopy`_ to load, or their own underlying geometry in the same format as a shapefile geometry. + +These shapefiles can be used to mask an iris cube, so that any data outside the bounds of the shapefile is hidden from further analysis or plotting. + +First, we load the correct shapefile from NaturalEarth via the `Cartopy_shapereader`_ instructions. Here we get one for Brazil. +The `.geometry` attribute of the records in the reader contain the `Shapely`_ polygon we're interested in. They contain the coordinates that define the polygon (or set of lines) being masked +and once we have those we just need to provide them to the :class:`iris.util.mask_cube_from_shapefile` function. +This returns a copy of the cube with a :class:`numpy.masked_array` as the data payload, where the data outside the shape is hidden by the masked array. We can see this in the following example. + + +.. plot:: userguide/plotting_examples/masking_brazil_plot.py + :include-source: + +We can see that the dimensions of the cube haven't changed - the plot is still global. But only the data over Brazil is plotted - the rest has been masked out. + +.. note:: + While Iris will try to dynamically adjust the shapefile to mask cubes of different projections, it can struggle with rotated pole projections and cubes with Meridians not at 0° + Converting your Cube's coordinate system may help if you get a fully masked cube as the output from this function unexpectedly. + + +Cube Iteration +-------------- +It is not possible to directly iterate over an Iris cube. That is, you cannot use code such as +``for x in cube:``. However, you can iterate over cube slices, as this section details. + +A useful way of dealing with a Cube in its **entirety** is by iterating over its layers or slices. +For example, to deal with a 3 dimensional cube (z,y,x) you could iterate over all 2 dimensional slices in y and x +which make up the full 3d cube.:: + + import iris + filename = iris.sample_data_path('hybrid_height.nc') + cube = iris.load_cube(filename) + print(cube) + for yx_slice in cube.slices(['grid_latitude', 'grid_longitude']): + print(repr(yx_slice)) + +As the original cube had the shape (15, 100, 100) there were 15 latitude longitude slices and hence the +line ``print(repr(yx_slice))`` was run 15 times. + +.. note:: + + The order of latitude and longitude in the list is important; had they been swapped the resultant cube slices + would have been transposed. + + For further information see :py:meth:`Cube.slices `. + + +This method can handle n-dimensional slices by providing more or fewer coordinate names in the list to **slices**:: + + import iris + filename = iris.sample_data_path('hybrid_height.nc') + cube = iris.load_cube(filename) + print(cube) + for i, x_slice in enumerate(cube.slices(['grid_longitude'])): + print(i, repr(x_slice)) + +The Python function :py:func:`enumerate` is used in this example to provide an incrementing variable **i** which is +printed with the summary of each cube slice. Note that there were 1500 1d longitude cubes as a result of +slicing the 3 dimensional cube (15, 100, 100) by longitude (i starts at 0 and 1500 = 15 * 100). + +.. hint:: + It is often useful to get a single 2d slice from a multidimensional cube in order to develop a 2d plot function, for example. + This can be achieved by using the ``next()`` function on the result of + slices:: + + first_slice = next(cube.slices(['grid_latitude', 'grid_longitude'])) + + Once the your code can handle a 2d slice, it is then an easy step to loop over **all** 2d slices within the bigger + cube using the slices method. + +.. _cube_indexing: + +Cube Indexing +------------- +In the same way that you would expect a numeric multidimensional array to be **indexed** to take a subset of your +original array, you can **index** a Cube for the same purpose. + + +Here are some examples of array indexing in :py:mod:`numpy`:: + + import numpy as np + # create an array of 12 consecutive integers starting from 0 + a = np.arange(12) + print(a) + + print(a[0]) # first element of the array + + print(a[-1]) # last element of the array + + print(a[0:4]) # first four elements of the array (the same as a[:4]) + + print(a[-4:]) # last four elements of the array + + print(a[::-1]) # gives all of the array, but backwards + + # Make a 2d array by reshaping a + b = a.reshape(3, 4) + print(b) + + print(b[0, 0]) # first element of the first and second dimensions + + print(b[0]) # first element of the first dimension (+ every other dimension) + + # get the second element of the first dimension and all of the second dimension + # in reverse, by steps of two. + print(b[1, ::-2]) + + +Similarly, Iris cubes have indexing capability:: + + import iris + filename = iris.sample_data_path('hybrid_height.nc') + cube = iris.load_cube(filename) + + print(cube) + + # get the first element of the first dimension (+ every other dimension) + print(cube[0]) + + # get the last element of the first dimension (+ every other dimension) + print(cube[-1]) + + # get the first 4 elements of the first dimension (+ every other dimension) + print(cube[0:4]) + + # Get the first element of the first and third dimension (+ every other dimension) + print(cube[0, :, 0]) + + # Get the second element of the first dimension and all of the second dimension + # in reverse, by steps of two. + print(cube[1, ::-2]) + + +.. _Cartopy_shapereader: https://scitools.org.uk/cartopy/docs/latest/tutorials/using_the_shapereader.html#id1 +.. _Natural Earth: https://www.naturalearthdata.com/ +.. _ESRI Shapefiles: https://support.esri.com/en-us/technical-paper/esri-shapefile-technical-description-279 + + diff --git a/docs/src/voted_issues.rst b/docs/src/voted_issues.rst new file mode 100644 index 0000000000..116c997f33 --- /dev/null +++ b/docs/src/voted_issues.rst @@ -0,0 +1,61 @@ +.. include:: common_links.inc + +.. _voted_issues_top: + +Voted Issues +============ + +You can help us to prioritise development of new features by leaving a 👍 +reaction on the header (not subsequent comments) of any issue. + +.. tip:: We suggest you subscribe to the issue so you will be updated. + When viewing the issue there is a **Notifications** + section where you can select to subscribe. + +Below is a sorted table of all issues that have 1 or more 👍 from our github +project. Please note that there is more development activity than what is on +the below table. + +.. _voted-issues.json: https://github.com/scitools/voted_issues/blob/main/voted-issues.json + +.. raw:: html + + + + + + + + + + + + + + + + +
    👍IssueAuthorTitle
    + + + + +

    + + +.. note:: The data in this table is updated every 30 minutes and is sourced + from `voted-issues.json`_. + For the latest data please see the `issues on GitHub`_. diff --git a/docs/iris/src/whatsnew/1.0.rst b/docs/src/whatsnew/1.0.rst similarity index 83% rename from docs/iris/src/whatsnew/1.0.rst rename to docs/src/whatsnew/1.0.rst index 2a415c1bfe..a2456c12db 100644 --- a/docs/iris/src/whatsnew/1.0.rst +++ b/docs/src/whatsnew/1.0.rst @@ -1,10 +1,7 @@ -What's new in Iris 1.0 -********************** +v1.0 (17 Oct 2012) +****************** -:Release: 1.0.0 -:Date: 15 Oct, 2012 - -This document explains the new/changed features of Iris in version 1.0. +This document explains the changes made to Iris for this release (:doc:`View all changes `.) With the release of Iris 1.0, we have broadly completed the transition @@ -13,7 +10,7 @@ work. Following this release we plan to deliver significant performance improvements and additional features. -The role of 1.x +The Role of 1.x =============== The 1.x series of releases is intended to provide a relatively stable, @@ -28,48 +25,44 @@ to formalise their data model reach maturity, they will be included in Iris where significant backwards-compatibility can be maintained. -Iris 1.0 features -================= +Features +======== A summary of the main features added with version 1.0: * Hybrid-pressure vertical coordinates, and the ability to load from GRIB. + * Initial support for CF-style coordinate systems. + * Use of Cartopy for mapping in matplotlib. + * Load data from NIMROD files. + * Availability of Cynthia Brewer colour palettes. + * Add a citation to a plot. + * Ensures netCDF files are properly closed. + * The ability to bypass merging when loading data. + * Save netCDF files with an unlimited dimension. + * A more explicit set of load functions, which also allow the automatic cube merging to be bypassed as a last resort. + * The ability to project a cube with a lat-lon or rotated lat-lon coordinate system into a range of map projections e.g. Polar Stereographic. - -Incompatible changes --------------------- -* The "source" and "history" metadata are now represented as Cube - attributes, where previously they used coordinates. -* :meth:`iris.cube.Cube.coord_dims()` now returns a tuple instead of a list. -* The ``iris.plot.gcm`` and ``iris.plot.map_setup`` functions are now removed. - See :ref:`whats-new-cartopy` for further details. - -Deprecations ------------- -* The methods :meth:`iris.coords.Coord.cos()` and - :meth:`iris.coords.Coord.sin()` have been deprecated. -* The :func:`iris.load_strict()` function has been deprecated. Code - should now use the :func:`iris.load_cube()` and - :func:`iris.load_cubes()` functions instead. +* Cube summaries are now more readable when the scalar coordinates + contain bounds. -CF-netCDF coordinate systems -============================ +CF-NetCDF Coordinate Systems +---------------------------- The coordinate systems in Iris are now defined by the CF-netCDF -`grid mappings `_. +`grid mappings `_. As of Iris 1.0 a subset of the CF-netCDF coordinate systems are supported, but this will be expanded in subsequent versions. Adding this code is a relatively simple, incremental process - it would make a @@ -79,27 +72,27 @@ contributing to the project. The coordinate systems available in Iris 1.0 and their corresponding Iris classes are: -================================================================================================== ========================================= -CF name Iris class -================================================================================================== ========================================= -`Latitude-longitude `_ :class:`~iris.coord_systems.GeogCS` -`Rotated pole `_ :class:`~iris.coord_systems.RotatedGeogCS` -`Transverse Mercator `_ :class:`~iris.coord_systems.TransverseMercator` -================================================================================================== ========================================= +================================================================================================================= ========================================= +CF Name Iris Class +================================================================================================================= ========================================= +`Latitude-longitude `_ :class:`~iris.coord_systems.GeogCS` +`Rotated pole `_ :class:`~iris.coord_systems.RotatedGeogCS` +`Transverse Mercator `_ :class:`~iris.coord_systems.TransverseMercator` +================================================================================================================= ========================================= For convenience, Iris also includes the :class:`~iris.coord_systems.OSGB` class which provides a simple way to create the transverse Mercator coordinate system used by the British -`Ordnance Survey `_. +`Ordnance Survey `_. .. _whats-new-cartopy: -Using Cartopy for mapping in matplotlib -======================================= +Using Cartopy for Mapping in Matplotlib +--------------------------------------- The underlying map drawing package has now been updated to use -`Cartopy `_. Cartopy provides a +`Cartopy `_. Cartopy provides a highly flexible set of mapping tools, with a consistent, intuitive interface. As yet it doesn't have feature-parity with basemap, but its goal is to make maps "just work", making it the perfect complement to Iris. @@ -139,32 +132,30 @@ interface: and :func:`matplotlib.pyplot.gca` should be used instead. For more examples of what can be done with Cartopy, see the Iris gallery and -`Cartopy's documentation `_. +`Cartopy's documentation `_. -Hybrid-pressure -=============== +Hybrid-Pressure +--------------- With the introduction of the :class:`~iris.aux_factory.HybridPressureFactory` class, it is now possible to represent data expressed on a -hybrid-pressure vertical coordinate, as defined by the second variant in -`Appendix D `_. +`hybrid-pressure vertical coordinate `_. A hybrid-pressure factory is created with references to the coordinates which provide the components of the hybrid coordinate ("ap" and "b") and the surface pressure. In return, it provides a virtual "pressure" coordinate whose values are derived from the given components. This facility is utilised by the GRIB2 loader to automatically provide -the derived "pressure" coordinate for certain data [#f1]_ from the -`ECMWF `_. +the derived "pressure" coordinate for certain data [#f1]_ from the ECMWF. .. [#f1] Where the level type is either 105 or 119, and where the surface pressure has an ECMWF paramId of - `152 `_. + `152 `_. NetCDF -====== +------ When saving a Cube to a netCDF file, Iris will now define the outermost dimension as an unlimited/record dimension. In combination with the @@ -189,8 +180,8 @@ dealing with large numbers of netCDF files, or in long running processes. -Brewer colour palettes -====================== +Brewer Colour Palettes +---------------------- Iris includes a selection of carefully designed colour palettes produced by Cynthia Brewer. The :mod:`iris.palette` module registers the Brewer @@ -211,12 +202,12 @@ function. The recommended text for the Cynthia Brewer citation is provided by :data:`iris.plot.BREWER_CITE`. To include a reference in a journal article or report please refer to -`section 5 `_ +`section 5 `_ in the citation guidance provided by Cynthia Brewer. -Metadata attributes -=================== +Metadata Attributes +------------------- Iris now stores "source" and "history" metadata in Cube attributes. For example:: @@ -249,8 +240,8 @@ Where previously it would have appeared as:: cube.add_aux_coord(src_coord) -New loading functions -===================== +New Loading Functions +--------------------- The main functions for loading cubes are now: - :func:`iris.load()` @@ -272,8 +263,8 @@ now use the :func:`iris.load_cube()` and :func:`iris.load_cubes()` functions instead. -Cube projection -=============== +Cube Projection +--------------- Iris now has the ability to project a cube into a number of map projections. This functionality is provided by :func:`iris.analysis.cartography.project()`. @@ -310,7 +301,24 @@ preserved. This function currently assumes global data and will if necessary extrapolate beyond the geographical extent of the source cube. -Other changes -============= -* Cube summaries are now more readable when the scalar coordinates - contain bounds. +Incompatible Changes +==================== + +* The "source" and "history" metadata are now represented as Cube + attributes, where previously they used coordinates. + +* :meth:`iris.cube.Cube.coord_dims()` now returns a tuple instead of a list. + +* The ``iris.plot.gcm`` and ``iris.plot.map_setup`` functions are now removed. + See :ref:`whats-new-cartopy` for further details. + + +Deprecations +============ + +* The methods :meth:`iris.coords.Coord.cos()` and + :meth:`iris.coords.Coord.sin()` have been deprecated. + +* The :func:`iris.load_strict()` function has been deprecated. Code + should now use the :func:`iris.load_cube()` and + :func:`iris.load_cubes()` functions instead. diff --git a/docs/iris/src/whatsnew/1.1.rst b/docs/src/whatsnew/1.1.rst similarity index 87% rename from docs/iris/src/whatsnew/1.1.rst rename to docs/src/whatsnew/1.1.rst index 274ec65ff6..86f0bb16fa 100644 --- a/docs/iris/src/whatsnew/1.1.rst +++ b/docs/src/whatsnew/1.1.rst @@ -1,71 +1,59 @@ -What's new in Iris 1.1 -********************** +v1.1 (03 Jan 2013) +****************** -:Release: 1.1.0 -:Date: 7 Dec, 2012 - -This document explains the new/changed features of Iris in version 1.1. +This document explains the changes made to Iris for this release (:doc:`View all changes `.) + +Features +======== + With the release of Iris 1.1, we are introducing support for Mac OS X. Version 1.1 also sees the first batch of performance enhancements, with some notable improvements to netCDF/PP import. - -Iris 1.1 features -================= - -A summary of the main features added with version 1.1: - * Support for Mac OS X. + * GRIB1 import now supports time units of "3 hours". + * Fieldsfile import now supports unpacked and "CRAY" 32-bit packed data in 64-bit Fieldsfiles. + * PP file import now supports "CRAY" 32-bit packed data. + * Various performance improvements, particularly for netCDF import, PP import, and constraints. + * GRIB2 export now supports level types of altitude and height (codes 102 and 103). + * iris.analysis.cartography.area_weights now supports non-standard dimension orders. + * PP file import now adds the "forecast_reference_time" for fields where LBTIM is 11, 12, 13, 31, or 32. + * PP file import now supports LBTIM values of 1, 2, and 3. + * Fieldsfile import now has some support for ancillary files. + * Coordinate categorisation functions added for day-of-year and user-defined seasons. + * GRIB2 import now has partial support for probability data defined with product template 4.9. -Bugs fixed ----------- -* PP export no longer attempts to set/overwrite the STASH code based on - the standard_name. -* Cell comparisons now work consistently, which fixes a bug where - bounded_cell > point_cell compares the point to the bounds but, - point_cell < bounded_cell compares the points. -* Fieldsfile import now correctly recognises pre v3.1 and post v5.2 - versions, which fixes a bug where the two were interchanged. -* iris.analysis.trajectory.interpolate now handles hybrid-height. - -Incompatible changes --------------------- -* N/A - -Deprecations ------------- -* N/A - -Coordinate categorisation -========================= +Coordinate Categorisation +------------------------- An :func:`~iris.coord_categorisation.add_day_of_year` categorisation function has been added to the existing suite in :mod:`iris.coord_categorisation`. -Custom seasons --------------- + +Custom Seasons +~~~~~~~~~~~~~~ The conventional seasonal categorisation functions have been complemented by two groups of functions which handle user-defined, @@ -97,3 +85,19 @@ The other custom season function is: This function adds a coordinate containing True/False values determined by membership of a single custom season. + + +Bugs Fixed +========== + +* PP export no longer attempts to set/overwrite the STASH code based on + the standard_name. + +* Cell comparisons now work consistently, which fixes a bug where + bounded_cell > point_cell compares the point to the bounds but, + point_cell < bounded_cell compares the points. + +* Fieldsfile import now correctly recognises pre v3.1 and post v5.2 + versions, which fixes a bug where the two were interchanged. + +* iris.analysis.trajectory.interpolate now handles hybrid-height. diff --git a/docs/src/whatsnew/1.10.rst b/docs/src/whatsnew/1.10.rst new file mode 100644 index 0000000000..92822087dd --- /dev/null +++ b/docs/src/whatsnew/1.10.rst @@ -0,0 +1,340 @@ +v1.10 (05 Sep 2016) +******************* + +This document explains the changes made to Iris for this release +(:doc:`View all changes `.) + + +Features +======== + +.. _iris_grib_added: + +* Support has now been added for the + `iris_grib `_ package, which + provides GRIB format support in an optional package, separate from Iris. + + * If ``iris_grib`` is available, it will always be used in place of the older + iris module :mod:`iris.fileformats.grib`. + + * The capabilities of ``iris_grib`` are essentially the same as the existing + :mod:`iris.fileformats.grib` when used with + ``iris.FUTURE.strict_grib_load=True``, with only small detail differences. + + * The old :mod:`iris.fileformats.grib` module is now deprecated and may + shortly be removed. + + * If you are already using the recommended :data:`iris.FUTURE` setting + ``iris.FUTURE.strict_grib_load=True`` this should not cause problems, as + the new package is all-but identical. + + * However, the option ``iris.FUTURE.strict_grib_load`` is itself now + deprecated, so you should remove code that sets it. + + * If, however, your code is still using the older "non-strict" grib + loading, then you may need to make code changes. + + * In particular, the ``field`` object passed to load callbacks is + different. + See :class:`iris.fileformats.grib.message.GribMessage` (the + ``iris_grib.message.GribMessage`` class is the same as this, for now). + + * Please exercise your code with the new iris_grib module, and let us know of + any problems you uncover, such as files that will no longer load with the + new implementation. + +* :meth:`iris.experimental.regrid.PointInCell.regridder` now works across + coordinate systems, including non latlon systems. Additionally, the + requirement that the source data X and Y coordinates be 2D has been removed. + NB: some aspects of this change are backwards incompatible. + +* Plotting non-Gregorian calendars is now supported. This adds + `nc_time_axis `_ as a dependency. + +* Promoting a scalar coordinate to a dimension coordinate with + :func:`iris.util.new_axis` no longer loads deferred data. + +* The parsing functionality for Cell Methods from netCDF files is available + as part of the :mod:`iris.fileformats.netcdf` module as + :func:`iris.fileformats.netcdf.parse_cell_methods`. + +* Support for the NameIII Version 2 file format has been added. + +* Loading netcdf data in Mercator and Stereographic projections now accepts + optional extra projection parameter attributes (``false_easting``, + ``false_northing`` and ``scale_factor_at_projection_origin``), if they match + the default values. + + * NetCDF files which define a Mercator projection where the + ``false_easting``, ``false_northing`` and + ``scale_factor_at_projection_origin`` match the defaults will have the + projection loaded correctly. Otherwise, a warning will be issued for each + parameter that does not match the default and the projection will not be + loaded. + + * NetCDF files which define a Steroegraphic projection where the + ``scale_factor_at_projection_origin`` is equal to 1.0 will have the + projection loaded correctly. Otherwise, a warning will be issued and the + projection will not be loaded. + +* The :mod:`iris.plot` routines :func:`~iris.plot.contour`, + :func:`~iris.plot.contourf`, :func:`~iris.plot.outline`, + :func:`~iris.plot.pcolor`, :func:`~iris.plot.pcolormesh` and + :func:`~iris.plot.points` now support plotting cubes with anonymous + dimensions by specifying the *numeric index* of the anonymous dimension + within the ``coords`` keyword argument. + + Note that the axis of the anonymous dimension will be plotted in index space. + +* NetCDF loading and saving now supports Cubes that use the LambertConformal + coordinate system. + +* The experimental structured Fieldsfile loader + :func:`~iris.experimental.fieldsfile.load` has been extended to also load + structured PP files. + + Structured loading is a streamlined operation, offering the benefit of a + significantly faster loading alternative to the more generic + :func:`iris.load` mechanism. + + Note that structured loading is not an optimised wholesale replacement of + :func:`iris.load`. Structured loading is restricted to input containing + contiguously ordered fields for each phenomenon that repeat regularly over + the same vertical levels and times. For further details, see + :func:`~iris.experimental.fieldsfile.load` + +* :mod:`iris.experimental.regrid_conservative` is now compatible with ESMPy v7. + +* Saving zonal (i.e. longitudinal) means to PP files now sets the '64s' bit in + LBPROC. + +* Loading of 'little-endian' PP files is now supported. + +* All appropriate :mod:`iris.plot` functions now handle an ``axes`` keyword, + allowing use of the object oriented matplotlib interface rather than pyplot. + +* The ability to pass file format object lists into the rules based load + pipeline, as used for GRIB, Fields Files and PP has been added. The + :func:`iris.fileformats.pp.load_pairs_from_fields` and + :func:`iris.fileformats.grib.load_pairs_from_fields` are provided to produce + cubes from such lists. These lists may have been filtered or altered using + the appropriate :mod:`iris.fileformats` modules. + +* Cubes can now have an 'hour' coordinate added with + :meth:`iris.coord_categorisation.add_hour`. + +* Time coordinates from PP fields with an lbcode of the form 3xx23 are now + correctly encoded with a 360-day calendar. + +* The loading from and saving to netCDF of CF cell_measure variables is + supported, along with their representation within a Cube as + :attr:`~iris.cube.Cube.cell_measures`. + +* Cubes with anonymous dimensions can now be concatenated. This can only occur + along a dimension that is not anonymous. + +* NetCDF saving of ``valid_range``, ``valid_min`` and ``valid_max`` cube + attributes is now allowed. + + +Bugs Fixed +========== + +* Altered Cell Methods to display coordinate's standard_name rather than + var_name where appropriate to avoid human confusion. + +* Saving multiple cubes with netCDF4 protected attributes should now work as + expected. + +* Concatenating cubes with singleton dimensions (dimensions of size one) now + works properly. + +* Fixed the ``grid_mapping_name`` and ``secant_latitudes`` handling for the + LambertConformal coordinate system. + +* Fixed bug in :func:`iris.analysis.cartography.project` where the output + projection coordinates didn't have units. + +* Attempting to use :meth:`iris.sample_data_path` to access a file that isn't + actually Iris sample data now raises a more descriptive error. A note about + the appropriate use of `sample_data_path` has also been added to the + documentation. + +* Fixed a bug where regridding or interpolation with the + :class:`~iris.analysis.Nearest` scheme returned floating-point results even + when the source data was integer typed. It now always returns the same type + as the source data. + +* Fixed a bug where regridding circular data would ignore any source masking. + This affected any regridding using the :class:`~iris.analysis.Linear` and + :class:`~iris.analysis.Nearest` schemes, and also + :func:`iris.analysis.interpolate.linear`. + +* The ``coord_name`` parameter to + :func:`~iris.fileformats.rules.scalar_cell_method` is now checked correctly. + +* LBPROC is set correctly when a cube containing the minimum of a variable is + saved to a PP file. The IA component of LBTIM is set correctly when saving + maximum or minimum values. + +* The performance of :meth:`iris.cube.Cube.extract` when a list of values is + given to an instance of :class:`iris.Constraint` has been improved + considerably. + +* Fixed a bug with :meth:`iris.cube.Cube.data` where an :class:`numpy.ndarray` + was not being returned for scalar cubes with lazy data. + +* When saving in netcdf format, the units of 'latitude' and 'longitude' + coordinates specified in 'degrees' are saved as 'degrees_north' and + 'degrees_east' respectively, as defined in the CF conventions for netCDF + files: sections 4.1 and 4.2. + +* Fixed a bug with a class of pp files with lbyr == 0, where the date would + cause errors when converting to a datetime object (e.g. when printing a cube). + + When processing a pp field with lbtim = 2x, lbyr == lbyrd == 0 and + lbmon == lbmond, 'month' and 'month_number' coordinates are created instead + of 'time'. + +* Fixed a bug in :meth:`~iris.analysis.calculus.curl` where the sign of the + r-component for spherical coordinates was opposite to what was expected. + +* A bug that prevented cube printing in some cases has been fixed. + +* Fixed a bug where a deepcopy of a :class:`~iris.coords.DimCoord` would have + writeable ``points`` and ``bounds`` arrays. These arrays can now no longer be + modified in-place. + +* Concatenation no longer occurs when the auxiliary coordinates of the cubes do + not match. This check is not applied to AuxCoords that span the dimension the + concatenation is occurring along. This behaviour can be switched off by + setting the ``check_aux_coords`` kwarg in + :meth:`iris.cube.CubeList.concatenate` to False. + +* Fixed a bug in :meth:`iris.cube.Cube.subset` where an exception would be + thrown while trying to subset over a non-dimensional scalar coordinate. + + +Incompatible Changes +==================== + +* The source and target for + :meth:`iris.experimental.regrid.PointInCell.regridder` must now have defined + coordinate systems (i.e. not ``None``). Additionally, the source data X and Y + coordinates must have the same cube dimensions. + + +Deprecations +============ + +* Deprecated the :class:`iris.Future` option + ``iris.FUTURE.strict_grib_load``. + This only affected the module :mod:`iris.fileformats.grib`, which is itself + now deprecated. + Please see :ref:`iris_grib package `, above. + +* Deprecated the module :mod:`iris.fileformats.grib`. The new package + `iris_grib `_ replaces this + functionality, which will shortly be removed. + Please see :ref:`iris_grib package `, above. + +* The use of :data:`iris.config.SAMPLE_DATA_DIR` has been deprecated and + replaced by the now importable + `iris_sample_data `_ package. + +* Deprecated the module :mod:`iris.analysis.interpolate`. + This contains the following public items, all of which are now deprecated and + will be removed in a future release: + + * :func:`~iris.analysis.interpolate.linear` + * :func:`~iris.analysis.interpolate.regrid` + * :func:`~iris.analysis.interpolate.regrid_to_max_resolution` + * :func:`~iris.analysis.interpolate.nearest_neighbour_indices` + * :func:`~iris.analysis.interpolate.nearest_neighbour_data_value` + * :func:`~iris.analysis.interpolate.extract_nearest_neighbour` + * class :class:`~iris.analysis.interpolate.Linear1dExtrapolator`. + + Please use the replacement facilities individually noted in the module + documentation for :mod:`iris.analysis.interpolate` + +* The method :meth:`iris.cube.Cube.regridded` has been deprecated. + Please use :meth:`iris.cube.Cube.regrid` instead (see + :meth:`~iris.cube.Cube.regridded` for details). + +* Deprecated :data:`iris.fileformats.grib.hindcast_workaround` and + :class:`iris.fileformats.grib.GribWrapper`. The class + :class:`iris.fileformats.grib.message.GribMessage` provides alternative means + of working with GRIB message instances. + +* Deprecated the module :mod:`iris.fileformats.ff`. Please use the replacement + facilities in module :mod:`iris.fileformats.um` : + + * :func:`iris.fileformats.um.um_to_pp` replaces + :class:`iris.fileformats.ff.FF2PP`. + * :func:`iris.fileformats.um.load_cubes` replaces + :func:`iris.fileformats.ff.load_cubes`. + * :func:`iris.fileformats.um.load_cubes_32bit_ieee` replaces + :func:`iris.fileformats.ff.load_cubes_32bit_ieee`. + + All other public components are generally deprecated and will be removed in a + future release. + +* The :func:`iris.fileformats.pp.as_pairs` and + :func:`iris.fileformats.grib.as_pairs` are deprecated. These are replaced + with :func:`iris.fileformats.pp.save_pairs_from_cube` and + :func:`iris.fileformats.grib.save_pairs_from_cube`. + +* ``iris.fileformats.pp_packing`` has been deprecated. Please install the + separate `mo_pack `_ package instead. + This provides the same functionality. + +* Deprecated logging functions (currently used only for rules logging): + :data:`iris.config.iris.config.RULE_LOG_DIR`, + :data:`iris.config.iris.config.RULE_LOG_IGNORE` and + :data:`iris.fileformats.rules.log`. + +* Deprecated all the remaining text rules mechanisms: + :class:`iris.fileformats.rules.DebugString`, + :class:`iris.fileformats.rules.CMAttribute`, + :class:`iris.fileformats.rules.CMCustomAttribute`, + :class:`iris.fileformats.rules.CoordAndDims`, + :class:`iris.fileformats.rules.Rule`, + :class:`iris.fileformats.rules.FunctionRule`, + :class:`iris.fileformats.rules.ProcedureRule`, + :class:`iris.fileformats.rules.RulesContainer` and + :func:`iris.fileformats.rules.calculate_forecast_period`. + +* Deprecated the custom pp save rules mechanism implemented by the functions + :func:`iris.fileformats.pp.add_save_rules` and + :func:`iris.fileformats.pp.reset_save_rules`. The functions + :func:`iris.fileformats.pp.as_fields`, :func:`iris.fileformats.pp.as_pairs` + and :func:`iris.fileformats.pp.save_fields` provide alternative means of + achieving the same ends. + + +Documentation +============= + +* It is now clear that repeated values will form a group under + :meth:`iris.cube.Cube.aggregated_by` even if they aren't consecutive. Hence, + the documentation for :mod:`iris.cube` has been changed to reflect this. + +* The documentation for :meth:`iris.analysis.calculus.curl` has been updated + for clarity. + +* False claims about :meth:`iris.fileformats.pp.save`, + :meth:`iris.fileformats.pp.as_pairs`, and + :meth:`iris.fileformats.pp.as_fields` being able to take instances of + :class:`iris.cube.CubeList` as inputs have been removed. + +* A new code example + :ref:`sphx_glr_generated_gallery_meteorology_plot_wind_speed.py`, + demonstrating the use of a quiver plot to display wind speeds over Lake + Victoria, has been added. + +* The docstring for :data:`iris.analysis.SUM` has been updated to explicitly + state that weights passed to it aren't normalised internally. + +* A note regarding the impossibility of partially collapsing multi-dimensional + coordinates has been added to the user guide. + diff --git a/docs/src/whatsnew/1.11.rst b/docs/src/whatsnew/1.11.rst new file mode 100644 index 0000000000..356e6ec85b --- /dev/null +++ b/docs/src/whatsnew/1.11.rst @@ -0,0 +1,40 @@ +v1.11 (29 Oct 2016) +********************* + +This document explains the changes made to Iris for this release +(:doc:`View all changes `.) + + +Features +======== + +* If available, display the ``STASH`` code instead of ``unknown / (unknown)`` + when printing cubes with no ``standard_name`` and no ``units``. + +* Support for saving to netCDF with data packing has been added. + +* The coordinate system :class:`iris.coord_systems.LambertAzimuthalEqualArea` + has been added with NetCDF saving support. + +Bugs Fixed +========== + +* Fixed a floating point tolerance bug in + :func:`iris.experimental.regrid.regrid_area_weighted_rectilinear_src_and_grid` + for wrapped longitudes. + +* Allow :func:`iris.util.new_axis` to promote the nominated scalar coordinate + of a cube with a scalar masked constant data payload. + +* Fixed a bug where :func:`iris.util._is_circular` would erroneously return + false when coordinate values are decreasing. + +* When saving to NetCDF, the existing behaviour of writing string attributes + as ASCII has been maintained across known versions of netCDF4-python. + + +Documentation +============= + +* Fuller doc-string detail added to :func:`iris.analysis.cartography.unrotate_pole` and + :func:`iris.analysis.cartography.rotate_pole`. diff --git a/docs/iris/src/whatsnew/1.12.rst b/docs/src/whatsnew/1.12.rst similarity index 95% rename from docs/iris/src/whatsnew/1.12.rst rename to docs/src/whatsnew/1.12.rst index 59ea47d876..2bb7090dd2 100644 --- a/docs/iris/src/whatsnew/1.12.rst +++ b/docs/src/whatsnew/1.12.rst @@ -1,14 +1,13 @@ -What's New in Iris 1.12 -*********************** +v1.12 (31 Jan 2017) +********************* -:Release: 1.12 -:Date: 2017-01-30 - -This document explains the new/changed features of Iris in version 1.12 +This document explains the changes made to Iris for this release (:doc:`View all changes `.) -Iris 1.12 Features -================== + +Features +======== + .. _showcase: .. admonition:: Showcase Feature: New regridding schemes @@ -121,11 +120,13 @@ Iris 1.12 Features Deprecations ============ + * The module :mod:`iris.experimental.fieldsfile` has been deprecated, in favour of the new fast-loading mechanism provided by :meth:`iris.fileformats.um.structured_um_loading`. -Documentation Changes -===================== +Documentation +============= + * Corrected documentation of :class:`iris.analysis.AreaWeighted` scheme to make the usage scope clearer. diff --git a/docs/src/whatsnew/1.13.rst b/docs/src/whatsnew/1.13.rst new file mode 100644 index 0000000000..4a2ecd8dbe --- /dev/null +++ b/docs/src/whatsnew/1.13.rst @@ -0,0 +1,73 @@ +v1.13 (17 May 2017) +******************* + +This document explains the changes made to Iris for this release +(:doc:`View all changes `.) + + +Features +======== + +* Allow the reading of NAME trajectories stored by time instead of by particle + number. + +* An experimental link to python-stratify via :mod:`iris.experimental.stratify`. + +* Data arrays may be shared between cubes, and subsets of cubes, by using the + :meth:`iris.cube.share_data` flag. + + +Bug Fixes +========= + +* The bounds are now set correctly on the longitude coordinate if a zonal mean + diagnostic has been loaded from a PP file as per the CF Standard. + +* NetCDF loading will now determine whether there is a string-valued scalar + label, i.e. a character variable that only has one dimension (the length of + the string), and interpret this correctly. + +* A line plot of geographic coordinates (e.g. drawing a trajectory) wraps + around the edge of the map cleanly, rather than plotting a segment straight + across the map. + +* When saving to PP, lazy data is preserved when generating PP fields from + cubes so that a list of cubes can be saved to PP without excessive memory + requirements. + +* An error is now correctly raised if a user tries to perform an arithmetic + operation on two cubes with mismatching coordinates. Previously these cases + were caught by the add and subtract operators, and now it is also caught by + the multiply and divide operators. + +* Limited area Rotated Pole datasets where the data range is + ``0 <= lambda < 360``, for example as produced in New Zealand, are plotted + over a sensible map extent by default. + +* Removed the potential for a RuntimeWarning: overflow encountered in + ``int_scalars`` which was missed during collapsed calculations. This could + trip up unwary users of limited data types, such as int32 for very large + numbers (e.g. seconds since 1970). + +* The CF conventions state that certain ``formula_terms`` terms may be omitted + and assumed to be zero + (https://cfconventions.org/cf-conventions/v1.6.0/cf-conventions.html#dimensionless-v-coord) + so Iris now allows factories to be constructed with missing terms. + +* In the User Guide's contour plot example, clabel inline is set to be False + so that it renders correctly, avoiding spurious horizontal lines across + plots, although this does make labels a little harder to see. + +* The computation of area weights has been changed to a more numerically + stable form. The previous form converted latitude to colatitude and used + difference of cosines in the cell area computation. This formulation uses + latitude and difference of sines. The conversion from latitude to colatitude + at lower precision causes errors when computing the cell areas. + + +Testing +======= + +* Iris has adopted conda-forge to provide environments for continuous + integration testing. + diff --git a/docs/iris/src/whatsnew/1.2.rst b/docs/src/whatsnew/1.2.rst similarity index 86% rename from docs/iris/src/whatsnew/1.2.rst rename to docs/src/whatsnew/1.2.rst index 720ae73376..dce0b6dc04 100644 --- a/docs/iris/src/whatsnew/1.2.rst +++ b/docs/src/whatsnew/1.2.rst @@ -1,16 +1,12 @@ -What's new in Iris 1.2 -********************** +v1.2 (28 Feb 2013) +****************** -:Release: 1.2.0 -:Date: 7th March 2013 - -This document explains the new/changed features of Iris in version 1.2. +This document explains the changes made to Iris for this release (:doc:`View all changes `.) -Iris 1.2 features -================= -A summary of the main features added with version 1.2: +Features +======== * :meth:`iris.cube.Cube.convert_units()` and :meth:`iris.coords.Coord.convert_units()` have been added. This is @@ -18,6 +14,7 @@ A summary of the main features added with version 1.2: another. For example, to convert a cube in kelvin to celsius, one can now call cube.convert_units('celsius'). The operation is in-place and if the units are not convertible an exception will be raised. + * :attr:`iris.cube.Cube.var_name`, :attr:`iris.coords.Coord.var_name` and :attr:`iris.aux_factory.AuxCoordFactory.var_name` attributes have been added. This attribute represents the CF variable name of the object. It is populated @@ -25,42 +22,57 @@ A summary of the main features added with version 1.2: var_name keyword argument has also been added to the :meth:`iris.cube.Cube.coord()`, :meth:`iris.cube.Cube.coords()` and :meth:`iris.cube.Cube.aux_factory()` methods. + * :meth:`iris.coords.Coord.is_compatible()` has been added. This method is used to determine whether two coordinates are sufficiently alike to allow operations such as :meth:`iris.coords.Coord.intersect()` and :func:`iris.analysis.interpolate.regrid()` to take place. A corresponding method for cubes, :meth:`iris.cube.Cube.is_compatible()`, has also been added. + * Printing a :class:`~iris.cube.Cube` is now more user friendly with regards to dates and time. All *time* and *forecast_reference_time* scalar coordinates now display human readable date/time information. + * The units of a :class:`~iris.cube.Cube` are now shown when it is printed. + * The area weights calculated by :func:`iris.analysis.cartography.area_weights` may now be normalised relative to the total grid area. -* Weights may now be passed to :meth:`iris.cube.Cube.rolling_window` aggregations, - thus allowing arbitrary digital filters to be applied to a :class:`~iris.cube.Cube`. -Bugs fixed ----------- +* Weights may now be passed to :meth:`iris.cube.Cube.rolling_window` + aggregations, thus allowing arbitrary digital filters to be applied to a + :class:`~iris.cube.Cube`. + + +Bugs Fixed +========== + * The GRIB hindcast interpretation of negative forecast times can be enabled via the :data:`iris.fileformats.grib.hindcast_workaround` flag. + * The NIMROD file loader has been extended to cope with orography vertical coordinates. -Incompatible changes --------------------- + +Incompatible Changes +==================== + * The deprecated :attr:`iris.cube.Cube.unit` and :attr:`iris.coords.Coord.unit` attributes have been removed. + Deprecations ------------- +============ + * The :meth:`iris.coords.Coord.unit_converted()` method has been deprecated. Users should make a copy of the coordinate using :meth:`iris.coords.Coord.copy()` and then call the :meth:`iris.coords.Coord.convert_units()` method of the new coordinate. + * With the addition of the var_name attribute the signatures of DimCoord and AuxCoord have changed. This should have no impact if you are providing parameters as keyword arguments, but it may cause issues if you are relying on the position/order of the arguments. + * Iteration over a :class:`~iris.cube.Cube` has been deprecated. Instead, users should use :meth:`iris.cube.Cube.slices`. diff --git a/docs/iris/src/whatsnew/1.3.rst b/docs/src/whatsnew/1.3.rst similarity index 82% rename from docs/iris/src/whatsnew/1.3.rst rename to docs/src/whatsnew/1.3.rst index dbea08ad03..1895711379 100644 --- a/docs/iris/src/whatsnew/1.3.rst +++ b/docs/src/whatsnew/1.3.rst @@ -1,69 +1,41 @@ -What's new in Iris 1.3 -********************** +v1.3 (27 Mar 2013) +****************** -:Release: 1.3.0 -:Date: 27 March 2013 - -This document explains the new/changed features of Iris in version 1.3. +This document explains the changes made to Iris for this release (:doc:`View all changes `.) -Iris 1.3 features -================= -A summary of the main features added with version 1.3: +Features +======== * Experimental support for :ref:`loading ABF/ABL files`. + * Support in :func:`iris.analysis.interpolate.linear` for longitude ranges other than [-180, 180]. + * Support for :ref:`customised CF profiles` on export to netCDF. + * The documentation now includes guidance on :ref:`how to cite Iris`. + * The ability to calculate the exponential of a Cube, via :func:`iris.analysis.maths.exp()`. + * Experimental support for :ref:`concatenating Cubes` along existing dimensions via :func:`iris.experimental.concatenate.concatenate()`. -Bugs fixed ----------- -* Printing a Cube now supports Unicode attribute values. -* PP export now sets LBMIN correctly. -* Converting between reference times now works correctly for - units with non-Gregorian calendars. -* Slicing a :class:`~iris.cube.CubeList` now returns a - :class:`~iris.cube.CubeList` instead of a normal list. - -Incompatible changes --------------------- -* N/A - -Deprecations ------------- -* The boolean methods/properties on the :class:`~iris.unit.Unit` class - have been updated to `is_...()` methods, in line with the project's - naming conventions. - - ====================================== =========================================== - Deprecated property/method New method - ====================================== =========================================== - :meth:`~iris.unit.Unit.convertible()` :meth:`~iris.unit.Unit.is_convertible()` - :attr:`~iris.unit.Unit.dimensionless` :meth:`~iris.unit.Unit.is_dimensionless()` - :attr:`~iris.unit.Unit.no_unit` :meth:`~iris.unit.Unit.is_no_unit()` - :attr:`~iris.unit.Unit.time_reference` :meth:`~iris.unit.Unit.is_time_reference()` - :attr:`~iris.unit.Unit.unknown` :meth:`~iris.unit.Unit.is_unknown()` - ====================================== =========================================== - .. _whats-new-abf: -Loading ABF/ABL files -===================== +Loading ABF/ABL Files +--------------------- Support for the ABF and ABL file formats (as -`defined `_ by the -climate and vegetation research group of Boston University), is +`defined `_ +by the climate and vegetation research group of Boston University), is currently provided under the "experimental" system. As such, ABF/ABL file detection is not automatically enabled. @@ -79,8 +51,8 @@ For example:: .. _whats-new-cf-profile: -Customised CF profiles -====================== +Customised CF Profiles +---------------------- Iris now provides hooks in the CF-netCDF export process to allow user-defined routines to check and/or modify the representation in the @@ -89,18 +61,21 @@ netCDF file. The following keys within the ``iris.site_configuration`` dictionary have been **reserved** as hooks to *external* user-defined CF profile functions: - * ``cf_profile`` injests a :class:`iris.cube.Cube` for analysis and returns a profile result - * ``cf_patch`` modifies the CF-netCDF file associated with export of the :class:`iris.cube.Cube` + * ``cf_profile`` ingests a :class:`iris.cube.Cube` for analysis and returns a + profile result + * ``cf_patch`` modifies the CF-netCDF file associated with export of the + :class:`iris.cube.Cube` -The ``iris.site_configuration`` dictionary should be configured via the ``iris/site_config.py`` file. +The ``iris.site_configuration`` dictionary should be configured via the +``iris/site_config.py`` file. For further implementation details see ``iris/fileformats/netcdf.py``. .. _whats-new-concat: -Cube concatenation -================== +Cube Concatenation +------------------ Iris now provides initial support for concatenating Cubes along one or more existing dimensions. Currently this will force the data to be @@ -126,3 +101,33 @@ combine these into a single Cube as follows:: As this is an experimental feature, your feedback is especially welcome. +Bugs Fixed +========== + +* Printing a Cube now supports Unicode attribute values. + +* PP export now sets LBMIN correctly. + +* Converting between reference times now works correctly for + units with non-Gregorian calendars. + +* Slicing a :class:`~iris.cube.CubeList` now returns a + :class:`~iris.cube.CubeList` instead of a normal list. + + +Deprecations +============ + +* The boolean methods/properties on the :class:`~iris.unit.Unit` class + have been updated to `is_...()` methods, in line with the project's + naming conventions. + + ====================================== =========================================== + Deprecated Property/Method New Method + ====================================== =========================================== + :meth:`~iris.unit.Unit.convertible()` :meth:`~iris.unit.Unit.is_convertible()` + :attr:`~iris.unit.Unit.dimensionless` :meth:`~iris.unit.Unit.is_dimensionless()` + :attr:`~iris.unit.Unit.no_unit` :meth:`~iris.unit.Unit.is_no_unit()` + :attr:`~iris.unit.Unit.time_reference` :meth:`~iris.unit.Unit.is_time_reference()` + :attr:`~iris.unit.Unit.unknown` :meth:`~iris.unit.Unit.is_unknown()` + ====================================== =========================================== diff --git a/docs/src/whatsnew/1.4.rst b/docs/src/whatsnew/1.4.rst new file mode 100644 index 0000000000..24a98488af --- /dev/null +++ b/docs/src/whatsnew/1.4.rst @@ -0,0 +1,249 @@ +v1.4 (14 Jun 2013) +****************** + +This document explains the changes made to Iris for this release +(:doc:`View all changes `.) + + +Features +======== + +* Multiple cubes can now be exported to a NetCDF file. + +* Correct nearest-neighbour calculation with circular coords. + +* :ref:`Experimental regridding enhancements`. + +* :ref:`Iris-Pandas interoperability`. + +* NIMROD level type 12 (levels below ground) can now be loaded. + +* :ref:`Load cubes from the internet via OPeNDAP`. + +* :ref:`GeoTiff export (experimental)`. + +* :ref:`Cube merge update`. + +* :ref:`Unambiguous season year naming`. + +* NIMROD files with multiple fields and period of interest can now be loaded. + +* Missing values are now handled when loading GRIB messages. + +* PP export rule to calculate forecast period. + +* :func:`~iris.cube.Cube.aggregated_by` now maintains array masking. + +* IEEE 32bit fieldsfiles can now be loaded. + +* NetCDF transverse mercator and climatology data can now be loaded. + +* Polar stereographic GRIB data can now be loaded. + +* :ref:`Cubes with no vertical coord can now be exported to GRIB`. + +* :ref:`Simplified resource configuration`. + +* :ref:`Extended GRIB parameter translation`. + +* Added an optimisation for single-valued coordinate constraints. + +* :ref:`One dimensional linear interpolation fix`. + +* :ref:`Fix for iris.analysis.calculus.differentiate`. + +* Fixed pickling of cubes with 2D aux coords from NetCDF. + +* Fixed bug which ignored the "coords" keyword for certain plots. + +* Use the latest release of Cartopy, v0.8.0. + +.. _OPeNDAP: https://www.opendap.org +.. _exp-regrid: + +Experimental Regridding Enhancements +------------------------------------ + +Bilinear, area-weighted and area-conservative regridding functions are now +available in :mod:`iris.experimental`. These functions support masked data and +handle derived coordinates such as hybrid height. The final API is still in +development. + +In the meantime: + + +Bilinear Rectilinear Regridding +------------------------------- + +:func:`~iris.experimental.regrid.regrid_bilinear_rectilinear_src_and_grid` +can be used to regrid a cube onto a horizontal grid defined in a differentiate +coordinate system. The data values are calculated using bilinear interpolation. + +For example:: + + from iris.experimental.regrid import regrid_bilinear_rectilinear_src_and_grid + regridded_cube = regrid_bilinear_rectilinear_src_and_grid(source_cube, target_grid_cube) + + +Area-Weighted Regridding +------------------------ + +:func:`~iris.experimental.regrid.regrid_area_weighted_rectilinear_src_and_grid` +can be used to regrid a cube such that the data values of the resulting cube +are calculated using the area-weighted mean. + +For example:: + + from iris.experimental.regrid import regrid_area_weighted_rectilinear_src_and_grid as regrid_area_weighted + regridded_cube = regrid_area_weighted(source_cube, target_grid_cube) + + +Area-Conservative Regridding +---------------------------- + +:func:`~iris.experimental.regrid_conservative.regrid_conservative_via_esmpy` +can be used for area-conservative regridding between geographical coordinate +systems. This uses the ESMF library functions, via the ESMPy interface. + +For example:: + + from iris.experimental.regrid_conservative import regrid_conservative_via_esmpy + regridded_cube = regrid_conservative_via_esmpy(source_cube, target_grid_cube) + + +.. _iris-pandas: + +Iris-Pandas Interoperability +---------------------------- + +Conversion to and from Pandas Series_ and DataFrames_ is now available. +See :mod:`iris.pandas` for more details. + +.. _Series: https://pandas.pydata.org/pandas-docs/stable/reference/series.html +.. _DataFrames: https://pandas.pydata.org/pandas-docs/stable/reference/frame.html + + +.. _load-opendap: + +Load Cubes From the Internet via OPeNDAP +---------------------------------------- + +Cubes can now be loaded directly from the internet, via OPeNDAP_. + +For example:: + + cubes = iris.load("https://geoport.whoi.edu/thredds/dodsC/bathy/gom15") + + +.. _geotiff_export: + +GeoTiff Export +-------------- + +With this experimental feature, two dimensional cubes can now be exported to +GeoTiff files. + +For example:: + + from iris.experimental.raster import export_geotiff + export_geotiff(cube, filename) + +.. note:: + + This is a raw data export only and does not save Iris plots. + + +.. _cube-merge-update: + +Cube Merge Update +----------------- + +Cube merging now favours numerical coordinates over string coordinates +to describe a dimension, and :class:`~iris.coords.DimCoord` over +:class:`~iris.coords.AuxCoord`. These modifications prevent the error: +*"No functional relationship between separable and inseparable candidate +dimensions"*. + + +.. _season-year-name: + +Unambiguous Season Year Naming +------------------------------ + +The default names of categorisation coordinates are now less ambiguous. +For example, :func:`~iris.coord_categorisation.add_month_number` and +:func:`~iris.coord_categorisation.add_month_fullname` now create +"month_number" and "month_fullname" coordinates. + + +.. _grib-novert: + +Cubes With no Vertical Coord can now be Exported to GRIB +-------------------------------------------------------- + +Iris can now export cubes with no vertical coord to GRIB. +The solution is still under discussion: See :issue:`519`. + + +.. _simple_cfg: + +Simplified Resource Configuration +--------------------------------- + +A new configuration variable called :data:`iris.config.TEST_DATA_DIR` +has been added, replacing the previous combination of +:data:`iris.config.MASTER_DATA_REPOSITORY` and +:data:`iris.config.DATA_REPOSITORY`. This constant should be the path +to a directory containing the test data required by the unit tests. It can +be set by adding a ``test_data_dir`` entry to the ``Resources`` section of +``site.cfg``. See :mod:`iris.config` for more details. + + +.. _grib_params: + +Extended GRIB Parameter Translation +----------------------------------- + + - More GRIB2 params are recognised on input. + - Now translates some codes on GRIB2 output. + - Some GRIB2 params may load with a different standard_name. + + + +.. _one-d-linear: + +One dimensional Linear Interpolation Fix +---------------------------------------- + +:func:`~iris.analysis.interpolate.linear` can now extrapolate from a single +point assuming a gradient of zero. This prevents an issue when loading cross +sections with a hybrid height coordinate, on a staggered grid and only a single +orography field. + + +.. _calc-diff-fix: + +Fix for iris.analysis.calculus.differentiate +-------------------------------------------- + +A bug in :func:`~iris.analysis.calculus.differentiate` that had the potential +to cause the loss of coordinate metadata when calculating the curl or the +derivative of a cube has been fixed. + + +Incompatible Changes +==================== + +* As part of simplifying the mechanism for accessing test data, + :func:`iris.io.select_data_path`, :data:`iris.config.DATA_REPOSITORY`, + :data:`iris.config.MASTER_DATA_REPOSITORY` and + :data:`iris.config.RESOURCE_DIR` have been removed. + +Deprecations +============ + +* The *add_custom_season_** functions from :mod:`~iris.coord_categorisation` + have been deprecated in favour of adding their functionality to the + *add_season_** functions + + diff --git a/docs/iris/src/whatsnew/1.5.rst b/docs/src/whatsnew/1.5.rst similarity index 91% rename from docs/iris/src/whatsnew/1.5.rst rename to docs/src/whatsnew/1.5.rst index 7af1e40285..72bdbac480 100644 --- a/docs/iris/src/whatsnew/1.5.rst +++ b/docs/src/whatsnew/1.5.rst @@ -1,16 +1,16 @@ -What's new in Iris 1.5 -********************** +v1.5 (13 Sep 2013) +****************** -:Release: 1.5.0 -:Date: 12 September 2013 - -This document explains the new/changed features of Iris in version 1.5. +This document explains the changes made to Iris for this release (:doc:`View all changes `.) -Iris 1.5 features -================= + +Features +======== + * Scatter plots can now be produced using :func:`iris.plot.scatter` and :func:`iris.quickplot.scatter`. + * The functions :func:`iris.plot.plot` and :func:`iris.quickplot.plot` now take up to two arguments, which may be cubes or coordinates, allowing the user to have full control over what is plotted on each axis. The coords keyword @@ -25,7 +25,9 @@ Iris 1.5 features * :class:`iris.analysis.SUM` is now a weighted aggregator, allowing it to take a weights keyword argument. + * GRIB2 translations added for standard_name 'soil_temperature'. + * :meth:`iris.cube.Cube.slices` can now handle passing dimension index as well as the currently supported types (string, coordinate), in order to slice in cases where there is no coordinate associated with a dimension (a mix of @@ -48,6 +50,7 @@ Iris 1.5 features plt.show() * Support for UM ancillary files truncated with the UM utility ieee + * Complete support for Transverse Mercator with saving to NetCDF also. .. code-block:: python @@ -70,18 +73,26 @@ Iris 1.5 features .. image:: images/transverse_merc.png * Support for loading NAME files (gridded and trajectory data). + * Multi-dimensional coordinate support added for :func:`iris.analysis.cartography.cosine_latitude_weights` + * Added limited packaged GRIB support (bulletin headers). + * In-place keyword added to :func:`iris.analysis.maths.divide` and :func:`iris.analysis.maths.multiply`. + * Performance gains for PP loading of the order of 40%. + * :mod:`iris.quickplot` now has a :func:`~iris.quickplot.show` function to provide convenient access to matplotlib.pyplot.show(). + * :meth:`iris.coords.DimCoord.from_regular` now implemented which creates a :class:`~iris.coords.DimCoord` with regularly spaced points, and optionally bounds. + * Iris can now cope with a missing bounds variable from NetCDF files. + * Added support for bool array indexing on a cube. .. code-block:: python @@ -95,73 +106,95 @@ Iris 1.5 features * Added support for loading fields defined on regular Gaussian grids from GRIB files. + * :func:`iris.analysis.interpolate.extract_nearest_neighbour` now works without needing to load the data (especially relevant to large datasets). + * When using plotting routines from :mod:`iris.plot` or :mod:`iris.quickplot`, the direction of vertical axes will be reversed if the corresponding coordinate has a "positive" attribute set to "down". - see: :ref:`Oceanography-atlantic_profiles` + see: :ref:`sphx_glr_generated_gallery_oceanography_plot_atlantic_profiles.py` * New PP stashcode translations added including 'dewpoint' and 'relative_humidity'. + * Added implied heights for several common PP STASH codes. + * GeoTIFF export capability enhanced for supporting various data types, coord systems and mapping 0 to 360 longitudes to the -180 to 180 range. -Bugs fixed ----------- +Bugs Fixed +========== + * NetCDF error handling on save has been extended to capture file path and permission errors. + * Shape of the Earth scale factors are now correctly interpreted by the GRIB loader. They were previously used as a multiplier for the given value but should have been used as a decimal shift. + * OSGB definition corrected. + * Transverse Mercator on load now accepts the following interchangeably due to inconsistencies in CF documentation: - * +scale_factor_at_central_meridian <-> scale_factor_at_projection_origin - * +longitude_of_central_meridian <-> longitude_of_projection_origin - (+recommended encoding) + + * +scale_factor_at_central_meridian <-> scale_factor_at_projection_origin + + * +longitude_of_central_meridian <-> longitude_of_projection_origin + (+recommended encoding) + * Ellipse description now maintained when converting GeogCS to cartopy. + * GeoTIFF export bug fixes. + * Polar axis now set to the North Pole, when a cube with no coordinate system is saved to the PP file-format. + * :meth:`iris.coords.DimCoord.from_coord` and :meth:`iris.coords.AuxCoord.from_coord` now correctly returns a copy of the source coordinate's coordinate system. + * Units part of the axis label is now omitted when the coordinate it represents is given as a time reference (:mod:`iris.quickplot`). + * CF dimension coordinate is now maintained in the resulting cube when a cube with CF dimension coordinate is being aggregated over. + * Units for Lambert conformal and polar stereographic coordinates now defined as meters. + * Various fieldsfile load bugs including failing to read the coordinates from the file have been fixed. + * Coding of maximum and minimum time-stats in GRIB2 saving has been fixed. -* Example code in section 4.1 of the userguide updated so it uses a sample + +* Example code in section 4.1 of the user guide updated so it uses a sample data file that exists. + * Zorder of contour lines drawn by :func:`~iris.plot.contourf` has been changed to address issue of objects appearing in-between line and filled contours. + * Coord comparisons now function correctly when comparing to numpy scalars. + * Cube loading constraints and :meth:`iris.cube.Cube.extract` correctly implement cell equality methods. -Incompatible changes --------------------- -* N/A - Deprecations ------------- +============ + * The coords keyword argument for :func:`iris.plot.plot` and :func:`iris.quickplot.plot` has been deprecated due to the new API which accepts multiple cubes or coordinates. + * :meth:`iris.fileformats.pp.PPField.regular_points` and :meth:`iris.fileformats.pp.PPField.regular_bounds` have now been deprecated in favour of a new factory method :meth:`iris.coords.DimCoord.from_regular()`. + * :func:`iris.fileformats.pp.add_load_rules` and :func:`iris.fileformats.grib.add_load_rules` are now deprecated. diff --git a/docs/iris/src/whatsnew/1.6.rst b/docs/src/whatsnew/1.6.rst similarity index 83% rename from docs/iris/src/whatsnew/1.6.rst rename to docs/src/whatsnew/1.6.rst index 4b540c6cc9..4b179b67d6 100644 --- a/docs/iris/src/whatsnew/1.6.rst +++ b/docs/src/whatsnew/1.6.rst @@ -1,14 +1,12 @@ -What's new in Iris 1.6 -********************** +v1.6 (26 Jan 2014) +****************** -:Release: 1.6.1 -:Date: 18th February 2014 - -This document explains the new/changed features of Iris in version 1.6. +This document explains the changes made to Iris for this release (:doc:`View all changes `.) -Iris 1.6 features -================= + +Features +======== .. _showcase: @@ -21,17 +19,17 @@ Iris 1.6 features reference coordinate will return *datetime-like* objects when invoked with :meth:`iris.coords.Coord.cell` or :meth:`iris.coords.Coord.cells`. - .. code-block:: python + .. code-block:: pycon >>> from iris.coords import DimCoord >>> iris.FUTURE.cell_datetime_objects = True - >>> coord = DimCoord([1, 2, 3], 'time', units='hours since epoch') + >>> coord = DimCoord([1, 2, 3], "time", units="hours since epoch") >>> print([str(cell) for cell in coord.cells()]) ['1970-01-01 01:00:00', '1970-01-01 02:00:00', '1970-01-01 03:00:00'] - Note that, either a :class:`datetime.datetime` or :class:`netcdftime.datetime` - object instance will be returned, depending on the calendar of the time - reference coordinate. + Note that, either a :class:`datetime.datetime` or + :class:`netcdftime.datetime` object instance will be returned, depending on + the calendar of the time reference coordinate. This capability permits the ability to express time constraints more naturally when the cell represents a *datetime-like* object. @@ -41,10 +39,12 @@ Iris 1.6 features # Ignore the 1st of January. iris.Constraint(time=lambda cell: cell.point.month != 1 and cell.point.day != 1) - Note that, :class:`iris.Future` also supports a `context manager `_ - which allows multiple sections of code to execute with different run-time behaviour. + Note that, :class:`iris.Future` also supports a + `context manager `_ + which allows multiple sections of code to execute with different run-time + behaviour. - .. code-block:: python + .. code-block:: pycon >>> print(iris.FUTURE) Future(cell_datetime_objects=False) @@ -63,12 +63,12 @@ Iris 1.6 features :class:`datetime.datetime` or :class:`netcdftime.datetime`. The *year, month, day, hour, minute, second* and *microsecond* attributes of - a :class:`iris.time.PartialDateTime` object may be fully or partially specified - for any given comparison. + a :class:`iris.time.PartialDateTime` object may be fully or partially + specified for any given comparison. This is particularly useful for time based constraints, whilst enabling the - :data:`iris.FUTURE.cell_datetime_objects`, see :ref:`here ` for further - details on this new release feature. + :data:`iris.FUTURE.cell_datetime_objects`, see :ref:`here ` for + further details on this new release feature. .. code-block:: python @@ -85,84 +85,236 @@ Iris 1.6 features * GRIB loading supports latitude/longitude or Gaussian reduced grids for version 1 and version 2. + * :ref:`A new utility function to assist with caching`. + * :ref:`The RMS aggregator supports weights`. + * :ref:`A new experimental function to equalise cube attributes`. + * :ref:`Collapsing a cube provides a tolerance level for missing-data`. + * NAME loading supports vertical coordinates. + * UM land/sea mask de-compression for Fieldsfiles and PP files. + * Lateral boundary condition Fieldsfile support. + * Staggered grid support for Fieldsfiles extended to type 6 (Arakawa C grid with v at poles). + * Extend support for Fieldsfiles with grid codes 11, 26, 27, 28 and 29. + * :ref:`Promoting a scalar coordinate to new leading cube dimension`. + * Interpreting cell methods from NAME. + * GRIB2 export without forecast_period, enabling NAME to GRIB2. + * Loading height levels from GRIB2. + * :func:`iris.coord_categorisation.add_categorised_coord` now supports multi-dimensional coordinate categorisation. -* Fieldsfiles and PP support for loading and saving of air potential temperature. + +* Fieldsfiles and PP support for loading and saving of air potential + temperature. + * :func:`iris.experimental.regrid.regrid_weighted_curvilinear_to_rectilinear` regrids curvilinear point data to a target rectilinear grid using associated area weights. -* Extended capability of the NetCDF saver :meth:`iris.fileformats.netcdf.Saver.write` - for fine-tune control of a :mod:`netCDF4.Variable`. Also allows multiple dimensions - to be nominated as *unlimited*. + +* Extended capability of the NetCDF saver + :meth:`iris.fileformats.netcdf.Saver.write` for fine-tune control of a + :mod:`netCDF4.Variable`. Also allows multiple dimensions to be nominated as + *unlimited*. + * :ref:`A new PEAK aggregator providing spline interpolation`. + * A new utility function :func:`iris.util.broadcast_to_shape`. + * A new utility function :func:`iris.util.as_compatible_shape`. + * Iris tests can now be run on systems where directory write permissions - previously did not allow it. This is achieved by writing to the current working - directory in such cases. + previously did not allow it. This is achieved by writing to the current + working directory in such cases. + * Support for 365 day calendar PP fields. + * Added phenomenon translation between cf and grib2 for wind (from) direction. + * PP files now retain lbfc value on save, derived from the stash attribute. -Bugs fixed +.. _caching: + +A New Utility Function to Assist With Caching +--------------------------------------------- +To assist with management of caching results to file, the new utility +function :func:`iris.util.file_is_newer_than` may be used to easily determine whether +the modification time of a specified cache file is newer than one or more other files. + +Typically, the use of caching is a means to circumvent the cost of repeating time +consuming processing, or to reap the benefit of fast-loading a pickled cube. + +.. code-block:: python + + # Determine whether to load from the cache or source. + if iris.util.file_is_newer(cache_file, source_file): + with open(cache_file, "rb") as fh: + cube = cPickle.load(fh) + else: + cube = iris.load_cube(source_file) + + # Perhaps perform some intensive processing ... + + # Create the cube cache. + with open(cache_file, 'wb') as fh: + cPickle.dump(cube, fh) + + +.. _rms: + +The RMS Aggregator Supports Weights +----------------------------------- + +The :data:`iris.analysis.RMS` aggregator has been extended to allow the use of +weights using the new keyword argument :data:`weights`. + +For example, an RMS weighted cube collapse is performed as follows: + +.. code-block:: python + + from iris.analysis import RMS + + collapsed_cube = cube.collapsed("height", RMS, weights=weights) + + +.. _equalise: + +Equalise Cube Attributes +------------------------ + +To assist with :class:`iris.cube.Cube` merging, the new experimental in-place +function :func:`iris.experimental.equalise_cubes.equalise_attributes` ensures +that a sequence of cubes contains a common set of :data:`iris.cube.Cube.attributes`. + +This attempts to smooth the merging process by ensuring that all candidate cubes +have the same attributes. + + +.. _tolerance: + +Masking a Collapsed Result by Missing-Data Tolerance +---------------------------------------------------- + +The result from collapsing masked cube data may now be completely +masked by providing a :data:`mdtol` missing-data tolerance keyword +to :meth:`iris.cube.Cube.collapsed`. + +This tolerance provides a threshold that will **completely** mask the +collapsed result whenever the fraction of data to missing-data is +less than or equal to the provided tolerance. + + +.. _promote: + +Promote a Scalar Coordinate +--------------------------- + +The new utility function :func:`iris.util.new_axis` creates a new cube with +a new leading dimension of size unity. If a scalar coordinate is provided, then +the scalar coordinate is promoted to be the dimension coordinate for the new +leading dimension. + +Note that, this function will load the data payload of the cube. + + +.. _peak: + +A New PEAK Aggregator Providing Spline Interpolation +---------------------------------------------------- + +The new :data:`iris.analysis.PEAK` aggregator calculates the global peak +value from a spline interpolation of the :class:`iris.cube.Cube` data payload +along a nominated coordinate axis. + +For example, to calculate the peak time: + +.. code-block:: python + + from iris.analysis import PEAK + + collapsed_cube = cube.collapsed("time", PEAK) + + +Bugs Fixed ========== -* :meth:`iris.cube.Cube.rolling_window` has been extended to support masked arrays. + +* :meth:`iris.cube.Cube.rolling_window` has been extended to support masked + arrays. + * :meth:`iris.cube.Cube.collapsed` now handles string coordinates. + * Default LBUSER(2) to -99 for Fieldsfile and PP saving. + * :func:`iris.util.monotonic` returns the correct direction. + * File loaders correctly parse filenames containing colons. + * ABF loader now correctly loads the ABF data payload once. + * Support for 1D array :data:`iris.cube.cube.attributes`. + * GRIB bounded level saving fix. + * :func:`iris.analysis.cartography.project` now associates a coordinate system with the resulting target cube, where applicable. + * :func:`iris.util.array_equal` now correctly ignores any mask if present, matching the behaviour of :func:`numpy.array_equal` except with string array support. + * :func:`iris.analysis.interpolate.linear` now retains a mask in the resulting cube. + * :meth:`iris.coords.DimCoord.from_regular` now correctly returns a coordinate which will always be regular as indicated by :func:`~iris.util.is_regular`. + * :func:`iris.util.rolling_window` handling of masked arrays (degenerate masks) fixed. + * Exception no longer raised for any ellipsoid definition in nimrod loading. -Incompatible changes + +Incompatible Changes ==================== + * The experimental 'concatenate' function is now a method of a :class:`iris.cube.CubeList`, see :meth:`iris.cube.CubeList.concatenate`. The functionality is unchanged. + * :meth:`iris.cube.Cube.extract_by_trajectory()` has been removed. Instead, use :func:`iris.analysis.trajectory.interpolate()`. + * :func:`iris.load_strict()` has been removed. Instead, use :func:`iris.load_cube()` and :func:`iris.load_cubes()`. + * :meth:`iris.coords.Coord.cos()` and :meth:`iris.coords.Coord.sin()` have been removed. + * :meth:`iris.coords.Coord.unit_converted()` has been removed. Instead, make a copy of the coordinate using :meth:`iris.coords.Coord.copy()` and then call the :meth:`iris.coords.Coord.convert_units()` method of the new coordinate. + * Iteration over a :class:`~iris.cube.Cube` has been removed. Instead, use :meth:`iris.cube.Cube.slices()`. -* The following :class:`~iris.unit.Unit` deprecated methods/properties have been removed. + +* The following :class:`~iris.unit.Unit` deprecated methods/properties have + been removed. ====================================== =========================================== - Removed property/method New method + Removed Property/Method New Method ====================================== =========================================== :meth:`~iris.unit.Unit.convertible()` :meth:`~iris.unit.Unit.is_convertible()` :attr:`~iris.unit.Unit.dimensionless` :meth:`~iris.unit.Unit.is_dimensionless()` @@ -170,18 +322,22 @@ Incompatible changes :attr:`~iris.unit.Unit.time_reference` :meth:`~iris.unit.Unit.is_time_reference()` :attr:`~iris.unit.Unit.unknown` :meth:`~iris.unit.Unit.is_unknown()` ====================================== =========================================== + * As a result of deprecating :meth:`iris.cube.Cube.add_history` and removing the automatic appending of history by operations such as cube arithmetic, collapsing, and aggregating, the signatures of a number of functions within :mod:`iris.analysis.maths` have been modified along with that of - :class:`iris.analysis.Aggregator` and :class:`iris.analysis.WeightedAggregator`. + :class:`iris.analysis.Aggregator` and + :class:`iris.analysis.WeightedAggregator`. + * The experimental ABF and ABL functionality has now been promoted to core functionality in :mod:`iris.fileformats.abf`. + * The following :mod:`iris.coord_categorisation` deprecated functions have been removed. =============================================================== ======================================================= - Removed function New function + Removed Function New Function =============================================================== ======================================================= :func:`~iris.coord_categorisation.add_custom_season` :func:`~iris.coord_categorisation.add_season` :func:`~iris.coord_categorisation.add_custom_season_number` :func:`~iris.coord_categorisation.add_season_number` @@ -191,124 +347,35 @@ Incompatible changes :func:`~iris.coord_categorisation.add_weekday_shortname` :func:`~iris.coord_categorisation.add_weekday` :func:`~iris.coord_categorisation.add_season_month_initials` :func:`~iris.coord_categorisation.add_season` =============================================================== ======================================================= + * When a cube is loaded from PP or GRIB and it has both time and forecast period - coordinates, and the time coordinate has bounds, the forecast period coordinate - will now also have bounds. These bounds will be aligned with the bounds of the - time coordinate taking into account the forecast reference time. Also, - the forecast period point will now be aligned with the time point. + coordinates, and the time coordinate has bounds, the forecast period + coordinate will now also have bounds. These bounds will be aligned with the + bounds of the time coordinate taking into account the forecast reference + time. Also, the forecast period point will now be aligned with the time point. + Deprecations ============ + * :meth:`iris.cube.Cube.add_history` has been deprecated in favour of users modifying/creating the history metadata directly. This is because the automatic behaviour did not deliver a sufficiently complete, auditable history and often prevented the merging of cubes. + * :func:`iris.util.broadcast_weights` has been deprecated and replaced by the new utility function :func:`iris.util.broadcast_to_shape`. + * Callback mechanism `iris.run_callback` has had its deprecation of return values revoked. The callback can now return cube instances as well as inplace changes to the cube. + New Contributors ================ -Congratulations and thank you to `felicityguest `_, `jkettleb `_, -`kwilliams-mo `_ and `shoyer `_ who all made their first contribution +Congratulations and thank you to +`felicityguest `_, +`jkettleb `_, +`kwilliams-mo `_ and +`shoyer `_ who all made their first contribution to Iris! - - ----- - - -.. _caching: - -A new utility function to assist with caching ---------------------------------------------- -To assist with management of caching results to file, the new utility -function :func:`iris.util.file_is_newer_than` may be used to easily determine whether -the modification time of a specified cache file is newer than one or more other files. - -Typically, the use of caching is a means to circumvent the cost of repeating time -consuming processing, or to reap the benefit of fast-loading a pickled cube. - -.. code-block:: python - - # Determine whether to load from the cache or source. - if iris.util.file_is_newer(cache_file, source_file): - with open(cache_file, 'rb') as fh: - cube = cPickle.load(fh) - else: - cube = iris.load_cube(source_file) - - # Perhaps perform some intensive processing ... - - # Create the cube cache. - with open(cache_file, 'wb') as fh: - cPickle.dump(cube, fh) - - -.. _rms: - -The RMS aggregator supports weights -=================================== -The :data:`iris.analysis.RMS` aggregator has been extended to allow the use of -weights using the new keyword argument :data:`weights`. - -For example, an RMS weighted cube collapse is performed as follows: - -.. code-block:: python - - from iris.analysis import RMS - collapsed_cube = cube.collapsed('height', RMS, weights=weights) - - -.. _equalise: - -Equalise cube attributes -======================== -To assist with :class:`iris.cube.Cube` merging, the new experimental in-place -function :func:`iris.experimental.equalise_cubes.equalise_attributes` ensures -that a sequence of cubes contains a common set of :data:`iris.cube.Cube.attributes`. - -This attempts to smooth the merging process by ensuring that all candidate cubes -have the same attributes. - - -.. _tolerance: - -Masking a collapsed result by missing-data tolerance -==================================================== -The result from collapsing masked cube data may now be completely -masked by providing a :data:`mdtol` missing-data tolerance keyword -to :meth:`iris.cube.Cube.collapsed`. - -This tolerance provides a threshold that will **completely** mask the -collapsed result whenever the fraction of data to missing-data is -less than or equal to the provided tolerance. - - -.. _promote: - -Promote a scalar coordinate -=========================== -The new utility function :func:`iris.util.new_axis` creates a new cube with -a new leading dimension of size unity. If a scalar coordinate is provided, then -the scalar coordinate is promoted to be the dimension coordinate for the new -leading dimension. - -Note that, this function will load the data payload of the cube. - - -.. _peak: - -A new PEAK aggregator providing spline interpolation -==================================================== -The new :data:`iris.analysis.PEAK` aggregator calculates the global peak -value from a spline interpolation of the :class:`iris.cube.Cube` data payload -along a nominated coordinate axis. - -For example, to calculate the peak time: - -.. code-block:: python - - from iris.analysis import PEAK - collapsed_cube = cube.collapsed('time', PEAK) diff --git a/docs/src/whatsnew/1.7.rst b/docs/src/whatsnew/1.7.rst new file mode 100644 index 0000000000..4c3f3197dc --- /dev/null +++ b/docs/src/whatsnew/1.7.rst @@ -0,0 +1,334 @@ +v1.7 (04 Jul 2014) +****************** + +This document explains the changes made to Iris for this release +(:doc:`View all changes `.) + + +Features +======== + +.. _showcase: + +.. admonition:: Showcase: Iris is making use of Biggus + + Iris is now making extensive use of + `Biggus `_ for virtual arrays and lazy + array evaluation. In practice this means that analyses of cubes with data + bigger than the available system memory are now possible. + + Other than the improved functionality the changes are mostly + transparent; for example, before the introduction of biggus, MemoryErrors + were likely for very large datasets:: + + >>> result = extremely_large_cube.collapsed('time', iris.analysis.MEAN) + MemoryError + + Now, for supported operations, the evaluation is lazy (i.e. it doesn't take + place until the actual data is subsequently requested) and can handle data + larger than available system memory:: + + >>> result = extremely_large_cube.collapsed('time', iris.analysis.MEAN) + >>> print(type(result)) + + + Memory is still a limiting factor if ever the data is desired as a NumPy + array (e.g. via :data:`cube.data `), but additional + methods have been added to the Cube to support querying and subsequently + accessing the "lazy" data form (see :meth:`~iris.cube.Cube.has_lazy_data` + and :meth:`~iris.cube.Cube.lazy_data`). + +.. admonition:: Showcase: New interpolation and regridding API + + New interpolation and regridding interfaces have been added which simplify + and extend the existing functionality. + + The interfaces are exposed on the cube in the form of the + :meth:`~iris.cube.Cube.interpolate` and :meth:`~iris.cube.Cube.regrid` + methods. Conceptually the signatures of the methods are:: + + interpolated_cube = cube.interpolate(interpolation_points, interpolation_scheme) + + and:: + + regridded_cube = cube.regrid(target_grid_cube, regridding_scheme) + + Whilst not all schemes have been migrated to the new interface, + :class:`iris.analysis.Linear` defines both linear interpolation and + regridding, and :class:`iris.analysis.AreaWeighted` defines an area weighted + regridding scheme. + +.. admonition:: Showcase: Merge and concatenate reporting + + Merge reporting is designed as an aid to the merge processes. Should merging + a :class:`~iris.cube.CubeList` fail, merge reporting means that a + descriptive error will be raised that details the differences between the + cubes in the :class:`~iris.cube.CubeList` that prevented the merge from + being successful. + + A new :class:`~iris.cube.CubeList` method, called + :meth:`~iris.cube.CubeList.merge_cube`, has been introduced. Calling it on a + :class:`~iris.cube.CubeList` will result in a single merged + :class:`~iris.cube.Cube` being returned or an error message being raised + that describes why the merge process failed. + + The following example demonstrates the error message that describes a merge + failure caused by cubes having differing attributes:: + + >>> cube_list = iris.cube.CubeList((c1, c2)) + >>> cube_list.merge_cube() + Traceback (most recent call last): + ... + raise iris.exceptions.MergeError(msgs) + iris.exceptions.MergeError: failed to merge into a single cube. + cube.attributes keys differ: 'foo' + + The naming of this new method mirrors that of Iris load functions, where one + would always expect a :class:`~iris.cube.CubeList` from :func:`iris.load` + and a :class:`~iris.cube.Cube` from :func:`iris.load_cube`. + + Concatenate reporting is the equivalent process for concatenating a + :class:`~iris.cube.CubeList`. It is accessed through the method + :meth:`~iris.cube.CubeList.concatenate_cube`, which will return a single + concatenated cube or produce an error message that describes why the + concatenate process failed. + +.. admonition:: Showcase: Cube broadcasting + + When performing cube arithmetic, cubes now follow similar broadcasting rules + as NumPy arrays. + + However, the additional richness of Iris coordinate meta-data provides an + enhanced capability beyond the basic broadcasting behaviour of NumPy. + + This means that when performing cube arithmetic, the dimensionality and + shape of cubes no longer need to match. For example, if the dimensionality + of a cube is reduced by collapsing, then the result can be used to subtract + from the original cube to calculate an anomaly:: + + >>> time_mean = original_cube.collapsed('time', iris.analysis.MEAN) + >>> mean_anomaly = original_cube - time_mean + + Given both broadcasting **and** coordinate meta-data, Iris can now perform + arithmetic with cubes that have similar but not identical shape:: + + >>> similar_cube = original_cube.copy() + >>> similar_cube.transpose() + >>> zero_cube = original_cube - similar_cube + +* Merge reporting that raises a descriptive error if the merge process fails. + +* Linear interpolation and regridding now make use of SciPy's + RegularGridInterpolator for much faster linear interpolation. + +* NAME file loading now handles the "no time averaging" column and translates + height/altitude above ground/sea-level columns into appropriate coordinate + metadata. + +* The NetCDF saver has been extended to allow saving of cubes with hybrid + pressure auxiliary factories. + +* PP/FF loading supports LBLEV of 9999. + +* Extended GRIB1 loading to support data on hybrid pressure levels. + +* :func:`iris.coord_categorisation.add_day_of_year` can be used to add + categorised day of year coordinates based on time coordinates with + non-Gregorian calendars. + +* Support for loading data on reduced grids from GRIB files in raw form without + automatically interpolating to a regular grid. + +* The coordinate systems :class:`iris.coord_systems.Orthographic` and + :class:`iris.coord_systems.VerticalPerspective` (for imagery from + geostationary satellites) have been added. + +* Extended NetCDF loading to support the "ocean sigma over z" auxiliary + coordinate + factory. + +* Support added for loading CF-NetCDF data with bounds arrays that are missing a + vertex dimension. + +* :meth:`iris.cube.Cube.rolling_window` can now be used with string-based + :class:`iris.coords.AuxCoord` instances. + +* Loading of PP and FF files has been optimised through deferring creation of + PPField attributes. + +* Automatic association of a coordinate's CF formula terms variable with the + data variable associated with that coordinate. + +* PP loading translates cross-section height into a dimensional auxiliary + coordinate. + +* String auxiliary coordinates can now be plotted with the Iris + plotting wrappers. + +* :func:`iris.analysis.geometry.geometry_area_weights` now + allows for the calculation of normalized cell weights. + +* Many new translations between the CF spec and STASH codes or GRIB2 parameter + codes. + +* PP save rules add the data's UM Version to the attributes of the saved + file when appropriate. + +* NetCDF reference surface variable promotion available through the + :class:`iris.FUTURE` mechanism. + +* A speed improvement in calculation of + :func:`iris.analysis.geometry.geometry_area_weights`. + +* The mdtol keyword was added to area-weighted regridding to allow control of + the tolerance for missing data. For a further description of this concept, see + :class:`iris.analysis.AreaWeighted`. + +* Handling for patching of the CF conventions global attribute via a defined + cf_patch_conventions function. + +* Deferred GRIB data loading has been introduced for reduced memory consumption + when loading GRIB files. + +* Concatenate reporting that raises a descriptive error if the concatenation + process fails. + +* A speed improvement when loading PP or FF data and constraining on STASH code. + + +Bugs Fixed +========== + +* Data containing more than one reference cube for constructing hybrid height + coordinates can now be loaded. + +* Removed cause of increased margin of error when interpolating. + +* Changed floating-point precision used when wrapping points for interpolation. + +* Mappables that can be used to generate colorbars are now returned by Iris + plotting wrappers. + +* NetCDF load ignores over-specified formula terms on bounded dimensionless + vertical coordinates. + +* Auxiliary coordinate factory loading now correctly interprets formula term + variables for "atmosphere hybrid sigma pressure" coordinate data. + +* Corrected comparison of NumPy NaN values in cube merge process. + +* Fixes for :meth:`iris.cube.Cube.intersection` to correct calculating the + intersection of a cube with split bounds, handling of circular coordinates, + handling of monotonically descending bounded coordinates and for finding a + wrapped two-point result and longitude tolerances. + +* A bug affecting :meth:`iris.cube.Cube.extract` and + :meth:`iris.cube.CubeList.extract` that led to unexpected behaviour when + operating on scalar cubes has been fixed. + +* Aggregate_by may now be passed single-value coordinates. + +* Making a copy of a :class:`iris.coords.DimCoord` no longer results in the + writeable flag on the copied points and bounds arrays being set to True. + +* Can now save to PP a cube that has vertical levels but no orography. + +* Fix a bug causing surface altitude and surface pressure fields to not appear + in cubes loaded with a STASH constraint. + +* Fixed support for :class:`iris.fileformats.pp.STASH` objects in STASH + constraints. + +* A fix to avoid a problem where cube attribute names clash with + NetCDF reserved attribute names. + +* A fix to allow :meth:`iris.cube.CubeList.concatenate` to deal with descending + coordinate order. + +* Add missing NetCDF attribute `varname` when constructing a new + :class:`iris.coords.AuxCoord`. * The datatype of time arrays converted with + :func:`iris.util.unify_time_units` is now preserved. + + +v1.7.3 (16 Dec 2014) +^^^^^^^^^^^^^^^^^^^^ + +* Scalar dimension coordinates can now be concatenated with + :meth:`iris.cube.CubeList.concatenate`. + +* Arbitrary names can no longer be set + for elements of a :class:`iris.fileformats.pp.SplittableInt`. + +* Cubes that contain a pseudo-level coordinate can now be saved to PP. + +* Fixed a bug in the FieldsFile loader that prevented it always loading all + available fields. + + +v1.7.4 (15 Apr 2015) +^^^^^^^^^^^^^^^^^^^^ + +* :meth:`Coord.guess_bounds` can now deal with circular coordinates. + +* :meth:`Coord.nearest_neighbour_index` can now work with descending bounds. + +* Passing `weights` to :meth:`Cube.rolling_window` no longer prevents other + keyword arguments from being passed to the aggregator. + +* Several minor fixes to allow use of Iris on Windows. + +* Made use of the new standard_parallels keyword in Cartopy's LambertConformal + projection (Cartopy v0.12). Older versions of Iris will not be able to + create LambertConformal coordinate systems with Cartopy >= 0.12. + + +Incompatible Changes +==================== + +* Saving a cube with a STASH attribute to NetCDF now produces a variable + with an attribute of "um_stash_source" rather than "ukmo__um_stash_source". + +* Cubes saved to NetCDF with a coordinate system referencing a spherical + ellipsoid now result in the grid mapping variable containing only the + "earth_radius" attribute, rather than the "semi_major_axis" and + "semi_minor_axis". + +* Collapsing a cube over all of its dimensions now results in a scalar cube + rather than a 1d cube. + + +Deprecations +============ + +* :func:`iris.util.ensure_array` has been deprecated. + +* Deprecated the :func:`iris.fileformats.pp.reset_load_rules` and + :func:`iris.fileformats.grib.reset_load_rules` functions. + +* Matplotlib is no longer a core Iris dependency. + + +Documentation +============= + +* New sections on :ref:`cube broadcasting ` and + :doc:`regridding and interpolation ` + have been added to the :doc:`user guide `. + +* An example demonstrating custom log-scale colouring has been added. + See :ref:`sphx_glr_generated_gallery_general_plot_anomaly_log_colouring.py`. + +* An example demonstrating the creation of a custom + :class:`iris.analysis.Aggregator` has been added. + See :ref:`sphx_glr_generated_gallery_general_plot_custom_aggregation.py`. + +* An example of reprojecting data from 2D auxiliary spatial coordinates + (such as that from the ORCA grid) has been added. See + :ref:`sphx_glr_generated_gallery_oceanography_plot_orca_projection.py`. + +* A clarification of the behaviour of + :func:`iris.analysis.calculus.differentiate`. + +* A new Technical Papers section has been added to + the documentation along with the addition of a paper providing an + :ref:`overview of the load process for UM-like fileformats (e.g. PP and Fieldsfile) `. diff --git a/docs/src/whatsnew/1.8.rst b/docs/src/whatsnew/1.8.rst new file mode 100644 index 0000000000..dd2ca5e155 --- /dev/null +++ b/docs/src/whatsnew/1.8.rst @@ -0,0 +1,231 @@ +v1.8 (14 Apr 2015) +****************** + +This document explains the changes made to Iris for this release +(:doc:`View all changes `.) + + +Features +======== + +.. _showcase: + +.. admonition:: Showcase: Rotate winds + + Iris can now rotate and unrotate wind vector data by transforming the wind + vector data to another coordinate system. + + For example:: + + >>> from iris.analysis.cartography import rotate_winds + >>> u_cube = iris.load_cube('my_rotated_u_wind_cube.pp') + >>> v_cube = iris.load_cube('my_rotated_v_wind_cube.pp') + >>> target_cs = iris.coord_systems.GeogCS(6371229.0) + >>> u_prime, v_prime = rotate_winds(u_cube, v_cube, target_cs) + +.. admonition:: Showcase: Nearest-neighbour scheme + + A nearest-neighbour scheme for interpolation and regridding has been added + to Iris. This joins the existing :class:`~iris.analysis.Linear` and + :class:`~iris.analysis.AreaWeighted` interpolation and regridding schemes. + + For example:: + + >>> result = cube.interpolate(sample_points, iris.analysis.Nearest()) + >>> regridded_cube = cube.regrid(target_grid, iris.analysis.Nearest()) + +.. admonition:: Showcase: Slices over a coordinate + + You can slice over one or more dimensions of a cube using + :meth:`iris.cube.Cube.slices_over`. + This provides similar functionality to :meth:`~iris.cube.Cube.slices` + but with almost the opposite outcome. + + Using :meth:`~iris.cube.Cube.slices` to slice a cube on a selected + dimension returns all possible slices of the cube with the selected + dimension retaining its dimensionality. Using + :meth:`~iris.cube.Cube.slices_over` to slice a cube on a selected + dimension returns all possible slices of the cube over the selected + dimension. + + To demonstrate this:: + + >>> cube = iris.load(iris.sample_data_path('colpex.pp'))[0] + >>> print(cube.summary(shorten=True)) + air_potential_temperature / (K) (time: 6; model_level_number: 10; grid_latitude: 83; grid_longitude: 83) + >>> my_slice = next(cube.slices('time')) + >>> my_slice_over = next(cube.slices_over('time')) + >>> print(my_slice.summary(shorten=True)) + air_potential_temperature / (K) (time: 6) + >>> print(my_slice_over.summary(shorten=True)) + air_potential_temperature / (K) (model_level_number: 10; grid_latitude: 83; grid_longitude: 83) + + +* :func:`iris.cube.CubeList.concatenate` now works with + `biggus `_ arrays and so + now supports concatenation of cubes with deferred data. + +* Improvements to NetCDF saving through using biggus: + + * A cube's lazy data payload will still be lazy after saving; the data will not + be loaded into memory by the save operation. + + * Cubes with data payloads larger than system memory can now be saved to NetCDF + through biggus streaming the data to disk. + +* :func:`iris.util.demote_dim_coord_to_aux_coord` and + :func:`iris.util.promote_aux_coord_to_dim_coord` + allow a coordinate to be easily demoted or promoted within a cube. + +* :func:`iris.util.squeeze` removes all length 1 dimensions from a cube, and + demotes any associated squeeze dimension :class:`~iris.coords.DimCoord` to be + a scalar coordinate. + +* :meth:`iris.cube.Cube.slices_over`, which returns an iterator of all + sub-cubes along a given coordinate or dimension index. + +* :meth:`iris.cube.Cube.interpolate` now accepts datetime.datetime and + netcdftime.datetime instances for date or time coordinates. + +* Many new and updated translations between CF spec and STASH codes or GRIB2 + parameter codes. + +* PP/FF loader creates a height coordinate at 1.5m or 10m for certain relevant + stash codes. + +* Lazy aggregator support for the + :class:`standard deviation ` aggregator has been added. + +* A speed improvement in calculation of + :func:`iris.analysis.cartography.area_weights`. + +* Experimental support for unstructured grids has been added with + :func:`iris.experimental.ugrid`. This has been implemented using + `UGRID `_. + +* :meth:`iris.cube.CubeList.extract_overlapping` supports extraction of cubes + over regions where common coordinates overlap, over multiple coordinates. + +* Warnings raised due to invalid units in loaded data have been suppressed. + +* Experimental low-level read and write access for FieldsFile variants is now + supported via :class:`iris.experimental.um.FieldsFileVariant`. + +* PP loader will return cubes for all fields prior to a field with a problematic + header before raising an exception. + +* NetCDF loader skips invalid global attributes, raising a warning rather than + raising an exception. + +* A warning is now raised rather than an exception when constructing an + :class:`~iris.aux_factory.AuxCoordFactory` fails. + +* Supported :class:`aux coordinate factories ` + have been extended to include: + + * ``ocean sigma coordinate``, + * ``ocean s coordinate``, + * ``ocean s coordinate, generic form 1``, and + * ``ocean s coordinate, generic form 2``. + +* :meth:`iris.cube.Cube.intersection` now supports taking a points-only + intersection. Any bounds on intersected coordinates are ignored but retained. + +* The FF loader's known handled grids now includes ``Grid 21``. + +* A :class:`nearest neighbour ` scheme is now provided + for :meth:`iris.cube.Cube.interpolate` and :meth:`iris.cube.Cube.regrid`. + +* :func:`iris.analysis.cartography.rotate_winds` supports transformation of + wind vectors to a different coordinate system. + +* NumPy universal functions can now be applied to cubes using + :func:`iris.analysis.maths.apply_ufunc`. + +* Generic functions can be applied to :class:`~iris.cube.Cube` instances using + :class:`iris.analysis.maths.IFunc`. + +* The :class:`iris.analysis.Linear` scheme now supports regridding as well as + interpolation. This enables :meth:`iris.cube.Cube.regrid` to perform bilinear + regridding, which now replaces the experimental routine + "iris.experimental.regrid.regrid_bilinear_rectilinear_src_and_grid". + + +Bugs Fixed +========== + +* Fix in netCDF loader to correctly determine whether the longitude coordinate + (including scalar coordinates) is circular. + +* :meth:`iris.cube.Cube.intersection` now supports bounds that extend slightly + beyond 360 degrees. + +* Lateral Boundary Condition (LBC) type FieldFiles are now handled correctly by + the FF loader. + +* Making a copy of a scalar cube with no data now correctly copies the data + array. + +* Height coordinates in NAME trajectory output files have been changed to match + other NAME output file formats. + +* Fixed datatype when loading an ``integer_constants`` array from a FieldsFile. + +* FF/PP loader adds appropriate cell methods for ``lbtim.ib = 3`` intervals. + +* An exception is raised if the units of the latitude and longitude coordinates + of the cube passed into :func:`iris.analysis.cartography.area_weights` are not + convertible to radians. + +* GRIB1 loader now creates a time coordinate for a time range indicator of 2. + +* NetCDF loader now loads units that are empty strings as dimensionless. + + +v1.8.1 (03 Jun 2015) +-------------------- + +* The PP loader now carefully handles floating point errors in date time + conversions to hours. + +* The handling fill values for lazy data loaded from NetCDF files is altered, + such that the _FillValue set in the file is preserved through lazy operations. + +* The risk that cube intersections could return incorrect results due to + floating point tolerances is reduced. + +* The new GRIB2 loading code is altered to enable the loading of various data + representation templates; the data value unpacking is handled by the GRIB API. + +* Saving cube collections to NetCDF, where multiple similar aux-factories exist + within the cubes, is now carefully handled such that extra file variables are + created where required in some cases. + + +Deprecations +============ + +* The original GRIB loader has been deprecated and replaced with a new + template-based GRIB loader. + +* Deprecated default NetCDF save behaviour of assigning the outermost + dimension to be unlimited. Switch to the new behaviour with no auto + assignment by setting :data:`iris.FUTURE.netcdf_no_unlimited` to True. + +* The former experimental method + "iris.experimental.regrid.regrid_bilinear_rectilinear_src_and_grid" has been + removed, as :class:`iris.analysis.Linear` now includes this functionality. + + +Documentation +============= + +* A chapter on :doc:`merge and concatenate ` has + been added to the :doc:`user guide `. + +* A section on installing Iris using `conda `_ has + been added to the :doc:`install guide `. + +* Updates to the chapter on + :doc:`regridding and interpolation ` + have been added to the :doc:`user guide `. diff --git a/docs/src/whatsnew/1.9.rst b/docs/src/whatsnew/1.9.rst new file mode 100644 index 0000000000..9829d8ff3b --- /dev/null +++ b/docs/src/whatsnew/1.9.rst @@ -0,0 +1,201 @@ +v1.9 (10 Dec 2015) +****************** + +This document explains the changes made to Iris for this release +(:doc:`View all changes `.) + + +Features +======== + +* Support for running on Python 3.4 has been added to the whole code base. + Some features which depend on external libraries will not be available until + they also support Python 3, namely: + + * gribapi does not yet provide a Python 3 interface + +* Added the UM pseudo level type to the information made available in the + STASH_TRANS table in :mod:`iris.fileformats.um._ff_cross_references` + +* When reading "cell_methods" attributes from NetCDF files, allow optional + whitespace before the colon. This is not strictly in the CF spec, but is a + common occurrence. + +* Basic cube arithmetic (plus, minus, times, divide) now supports lazy + evaluation. + +* :meth:`iris.analysis.cartography.rotate_winds` can now operate much faster + on multi-layer (i.e. > 2-dimensional) cubes, as it calculates rotation + coefficients only once and reuses them for additional layers. + +* Linear regridding of a multi-layer (i.e. > 2-dimensional) cube is now much + faster, as it calculates transform coefficients just once and reuses them for + additional layers. + +* Ensemble statistics can now be saved to GRIB2, using Product Definition + Template 4.11. + +* Loading of NetCDF data with ocean vertical coordinates now returns a 'depth' + in addition to an 'eta' cube. This operates on specific defined + dimensionless coordinates : see CF spec version 1.6, Appendix D. + +* :func:`iris.analysis.stats.pearsonr` updates: + + * Cubes can now be different shapes, provided one is broadcastable to the + other. + * Accepts weights keyword for weighted correlations. + * Accepts mdtol keyword for missing data tolerance level. + * Accepts common_mask keyword for restricting calculation to unmasked pairs of + cells. + +* Added a new point-in-cell regridding scheme, + :class:`iris.experimental.regrid.PointInCell`. + +* Added :meth:`iris.analysis.WPERCENTILE` - a new weighted aggregator for + calculating percentiles. + +* Added cell-method translations for LBPROC=64 and 192 in UM files, encoding + 'zonal mean' and 'zonal+time mean'. + +* Support for loading GRIB2 messages defined on a Lambert conformal grid has + been added to the GRIB2 loader. + +* Data on potential-temperature (theta) levels can now be saved to GRIB2, with + a fixed surface type of 107. + +* Added several new helper functions for file-save customisation, + (see also : :doc:`Saving Iris Cubes `): + + * :meth:`iris.fileformats.grib.as_pairs` + * :meth:`iris.fileformats.grib.as_messages` + * :meth:`iris.fileformats.grib.save_messages` + * :meth:`iris.fileformats.pp.as_pairs` + * :meth:`iris.fileformats.pp.as_fields` + * :meth:`iris.fileformats.pp.save_fields` + +* Loading data from GRIB2 now supports most of the currently defined 'data + representation templates' : code numbers 0, 1, 2, 3, 4, 40, 41, 50, 51 and 61. + +* When a Fieldsfile is opened for update as a + :class:`iris.experimental.um.FieldsFileVariant`, unmodified packed data in + the file can now be retained in the original form. Previously it could only + be stored in an unpacked form. + +* When reading and writing NetCDF data, the CF 'flag' attributes, + "flag_masks", "flag_meanings" and "flag_values" are now preserved through + Iris load and save. + +* `mo_pack `_ was added as an optional + dependency. + It is used to encode and decode data in WGDOS packed form. + +* The :meth:`iris.experimental.um.Field.get_data` method can now be used to + read Fieldsfile data after the original + :class:`iris.experimental.um.FieldsFileVariant` has been closed. + +Bugs Fixed +========== + +* Fixed a bug in :meth:`iris.unit.Unit.convert` + (and the equivalent in `cf_units `_) + so that it now converts data to the native endianness, without which udunits + could not read it correctly. + +* Fixed a bug with loading WGDOS packed data in :mod:`iris.experimental.um`, + which could occasionally crash, with some data. + +* Ignore non-numeric suffices in the numpy version string, which would + otherwise crash some regridding routines. + +* fixed a bug in :mod:`iris.fileformats.um_cf_map` where the standard name + for the stash code m01s12i187 was incorrectly set, such that it is + inconsistent with the stated unit of measure, 'm s-1'. The different name, + a long_name of 'change_over_time_in_upward_air_velocity_due_to_advection' with + units of 'm s-1' is now used instead. + +* Fixed a bug in :meth:`iris.cube.Cube.intersection`. + When edge points were at (base + period), intersection would unnecessarily + wrap the data. + +* Fixed a bug in :mod:`iris.fileformats.pp`. + A previous release removed the ability to pass a partial constraint on STASH + attribute. + +* :meth:`iris.plot.default_projection_extent` now correctly raises an exception + if a cube has X bounds but no Y bounds, or vice versa. Previously it never + failed this, as the test was wrong. + +* When loading NetCDF data, a "units" attribute containing unicode characters + is now transformed by backslash-replacement. Previously this caused a crash. + Note: unicode units are *not supported in the CF conventions*. + +* When saving to NetCDF, factory-derived auxiliary coordinates are now correctly + saved with different names when they are not identical. Previously, such + coordinates could be saved with the same name, leading to errors. + +* Fixed a bug in :meth:`iris.experimental.um.FieldsFileVariant.close`, + which now correctly allocates extra blocks for larger lookups when saving. + Previously, when larger files open for update were closed, they could be + written out with data overlapping the lookup table. + +* Fixed a bug in :class:`iris.aux_factory.OceanSigmaZFactory` + which sometimes caused crashes when fetching the points of an "ocean sigma z" + coordinate. + + +v1.9.1 (05 Jan 2016) +-------------------- + +* Fixed a unicode bug preventing standard names from being built cleanly when + installing in Python3 + + +v1.9.2 (28 Jan 2016) +-------------------- + +* New warning regarding data loss if writing to an open file which is also + open to read, with lazy data. + +* Removal of a warning about data payload loading from concatenate. + +* Updates to concatenate documentation. + +* Fixed a bug with a name change in the netcdf4-python package. + +* Fixed a bug building the documentation examples. + +* Fixed a bug avoiding sorting classes directly when + :meth:`iris.cube.Cube.coord_system` is used in Python3. + +* Fixed a bug regarding unsuccessful dot import. + + +Incompatible Changes +==================== + +* GRIB message/file reading and writing may not be available for Python 3 due + to GRIB API limitations. + + +Deprecations +============ + +* Deprecated :mod:`iris.unit`, with unit functionality provided by + `cf_units `_ instead. + +* When loading from NetCDF, a deprecation warning is emitted if there is + vertical coordinate information that *would* produce extra result cubes if + :data:`iris.FUTURE.netcdf_promote` were set, but it is *not* set. + +* Deprecated :class:`iris.aux_factory.LazyArray` + + +Documentation +============= + +* A chapter on :doc:`saving iris cubes ` has been + added to the :doc:`user guide `. + +* Added script and documentation for building a what's new page from + developer-submitted contributions. See + :doc:`Contributing a "What's New" entry `. diff --git a/docs/iris/src/whatsnew/2.0.rst b/docs/src/whatsnew/2.0.rst similarity index 96% rename from docs/iris/src/whatsnew/2.0.rst rename to docs/src/whatsnew/2.0.rst index 43d60a8539..1ee159c662 100644 --- a/docs/iris/src/whatsnew/2.0.rst +++ b/docs/src/whatsnew/2.0.rst @@ -1,16 +1,13 @@ -What's New in Iris 2.0.0 -************************ +v2.0 (14 Feb 2018) +****************** -:Release: 2.0.0rc1 -:Date: 2018-01-11 +This document explains the changes made to Iris for this release +(:doc:`View all changes `.) -This document explains the new/changed features of Iris in version 2.0.0 -(:doc:`View all changes `). +Features +======== - -Iris 2.0.0 Features -=================== .. _showcase: .. admonition:: Dask Integration @@ -39,7 +36,7 @@ Iris 2.0.0 Features * The *new* in-place arithmetic operators :data:`__iadd__`, :data:`__idiv__`, :data:`__imul__`, :data:`__isub__`, and :data:`__itruediv__` have been added to support :class:`~iris.cube.Cube` operations :data:`+=`, - :data:`/=`, :data:`*=`, and :data:`-=`. Note that, for **divison** + :data:`/=`, :data:`*=`, and :data:`-=`. Note that, for **division** *__future__.division* is always in effect. * Changes to the :class:`iris.coords.Coord`: @@ -63,7 +60,7 @@ Iris 2.0.0 Features respectively. -The :data:`iris.FUTURE` has arrived! +The :data:`iris.FUTURE` has Arrived! ------------------------------------ Throughout version 1 of Iris a set of toggles in @@ -209,8 +206,8 @@ Incompatible Changes printed as ``m.s-1``. -Deprecation removals --------------------- +Deprecation +=========== All deprecated functionality that was announced for removal in Iris 2.0 has been removed. In particular: @@ -289,8 +286,8 @@ been removed. In particular: removed from the :class:`iris.fileformats.rules.Loader` constructor. -Documentation Changes -===================== +Documentation +============= * A new UserGuide chapter on :doc:`Real and Lazy Data ` has been added, and referenced from key @@ -298,6 +295,6 @@ Documentation Changes .. _Biggus: https://biggus.readthedocs.io/en/latest/ -.. _Dask: http://dask.pydata.org/en/latest/ +.. _Dask: https://dask.pydata.org/en/latest/ .. _iris_grib: https://github.com/SciTools/iris-grib/ -.. _schedulers: http://dask.pydata.org/en/latest/scheduler-overview.html +.. _schedulers: https://dask.pydata.org/en/latest/scheduler-overview.html diff --git a/docs/iris/src/whatsnew/2.1.rst b/docs/src/whatsnew/2.1.rst similarity index 84% rename from docs/iris/src/whatsnew/2.1.rst rename to docs/src/whatsnew/2.1.rst index 00f7115431..3613bc0c23 100644 --- a/docs/iris/src/whatsnew/2.1.rst +++ b/docs/src/whatsnew/2.1.rst @@ -1,37 +1,14 @@ -What's New in Iris 2.1 -********************** +.. include:: ../common_links.inc -:Release: 2.1 -:Date: 2018-06-06 +v2.1 (06 Jun 2018) +****************** -This document explains the new/changed features of Iris in version 2.1 -(:doc:`older "What's New" release notes can be found here`.) +This document explains the changes made to Iris for this release +(:doc:`View all changes `.) -Iris 2.1 Dependency updates -=========================== - -* The `cf_units `_ dependency - was updated to cf_units ``v2.0``. - cf_units v2 is almost entirely backwards compatible with v1. - However the ability to preserve some aliased calendars has been removed. - For this reason, it is possible that NetCDF load of a variable with a - "standard" calendar will result in a saved NetCDF of a "gregorian" - calendar. -* Iris updated its time-handling functionality from the - `netcdf4-python `_ - ``netcdftime`` implementation to the standalone module - `cftime `_. - cftime is entirely compatible with netcdftime, but some issues may - occur where users are constructing their own datetime objects. - In this situation, simply replacing ``netcdftime.datetime`` with - ``cftime.datetime`` should be sufficient. -* Iris now requires version 2 of Matplotlib, and ``>=1.14`` of NumPy. - Full requirements can be seen in the `requirements `_ - directory of the Iris' the source. - -Iris 2.1 Features -================= +Features +======== * Added ``repr_html`` functionality to the :class:`~iris.cube.Cube` to provide a rich html representation of cubes in Jupyter notebooks. Existing functionality @@ -42,42 +19,83 @@ Iris 2.1 Features * Updated :func:`iris.cube.Cube.name` to return a STASH code if the cube has one and no other valid names are present. This is now consistent with the summary information from :func:`iris.cube.Cube.summary`. + * The partial collapse of multi-dimensional auxiliary coordinates is now supported. Collapsed bounds span the range of the collapsed dimension(s). + * Added new function :func:`iris.cube.CubeList.realise_data` to compute multiple lazy values in a single operation, avoiding repeated re-loading of data or re-calculation of expressions. + * The methods :meth:`iris.cube.Cube.convert_units` and :meth:`iris.coords.Coord.convert_units` no longer forcibly realise the cube data or coordinate points/bounds. The converted values are now lazy arrays if the originals were. + * Added :meth:`iris.analysis.trajectory.interpolate` that allows you to interpolate to find values along a trajectory. + * It is now possible to add an attribute of ``missing_value`` to a cube (:issue:`1588`). + * Iris can now represent data on the Albers Equal Area Projection, and the NetCDF loader and saver were updated to handle this. (:issue:`2943`) + * The :class:`~iris.coord_systems.Mercator` projection has been updated to accept the ``standard_parallel`` keyword argument (:pull:`3041`). + Bugs Fixed ========== * All var names being written to NetCDF are now CF compliant. Non alpha-numeric characters are replaced with '_', and var names now always have a leading letter (:pull:`2930`). + * A cube resulting from a regrid operation using the `iris.analysis.AreaWeighted` regridding scheme will now have the smallest floating point data type to which the source cube's data type can be safely converted using NumPy's type promotion rules. + * :mod:`iris.quickplot` labels now honour the axes being drawn to when using the ``axes`` keyword (:pull:`3010`). + Incompatible Changes ==================== + * The deprecated :mod:`iris.experimental.um` was removed. Please use consider using `mule `_ as an alternative. + * This release of Iris contains a number of updated metadata translations. - See [this changelist](https://github.com/SciTools/iris/commit/69597eb3d8501ff16ee3d56aef1f7b8f1c2bb316#diff-1680206bdc5cfaa83e14428f5ba0f848) + See this + `changelist `_ for further information. + + +Internal +======== + +* The `cf_units `_ dependency + was updated to cf_units ``v2.0``. + cf_units v2 is almost entirely backwards compatible with v1. + However the ability to preserve some aliased calendars has been removed. + For this reason, it is possible that NetCDF load of a variable with a + "standard" calendar will result in a saved NetCDF of a "gregorian" + calendar. + +* Iris updated its time-handling functionality from the + `netcdf4-python`__ + ``netcdftime`` implementation to the standalone module + `cftime `_. + cftime is entirely compatible with netcdftime, but some issues may + occur where users are constructing their own datetime objects. + In this situation, simply replacing ``netcdftime.datetime`` with + ``cftime.datetime`` should be sufficient. + +__ `netCDF4`_ + +* Iris now requires version 2 of Matplotlib, and ``>=1.14`` of NumPy. + Full requirements can be seen in the `requirements`_ + directory of the Iris' the source. diff --git a/docs/iris/src/whatsnew/2.2.rst b/docs/src/whatsnew/2.2.rst similarity index 91% rename from docs/iris/src/whatsnew/2.2.rst rename to docs/src/whatsnew/2.2.rst index 1eff99ecb4..a1f48f962b 100644 --- a/docs/iris/src/whatsnew/2.2.rst +++ b/docs/src/whatsnew/2.2.rst @@ -1,17 +1,13 @@ -What's New in Iris 2.2 -************************ +v2.2 (11 Oct 2018) +****************** -:Release: 2.2.0 -:Date: +This document explains the changes made to Iris for this release +(:doc:`View all changes `.) -This document explains the new/changed features of Iris in the release -of version 2.2 -(:doc:`View all changes `). +Features +======== - -Iris 2.2 Features -=================== .. _showcase: .. admonition:: 2-Dimensional Coordinate Plotting @@ -70,18 +66,6 @@ Iris 2.2 Features a NaN-tolerant array comparison. -Iris 2.2 Dependency updates -============================= - -* Iris is now using the latest version release of dask (currently 0.19.3) - -* Proj4 has been temporarily pinned to version < 5 while problems with the - Mollweide projection are addressed. - -* Matplotlib has been pinned to version < 3 temporarily while we account for - its changes in all SciTools libraries. - - Bugs Fixed ========== @@ -93,7 +77,7 @@ Bugs Fixed bound data is actually masked. -Bugs fixed in v2.2.1 +v2.2.1 (28 May 2019) -------------------- * Iris can now correctly unpack a column of header objects when saving a @@ -108,9 +92,20 @@ Bugs fixed in v2.2.1 floating-point arithmetic. +Internal +======== + +* Iris is now using the latest version release of dask (currently 0.19.3) + +* Proj4 has been temporarily pinned to version < 5 while problems with the + Mollweide projection are addressed. + +* Matplotlib has been pinned to version < 3 temporarily while we account for + its changes in all SciTools libraries. + -Documentation Changes -===================== +Documentation +============= * Iris' `INSTALL` document has been updated to include guidance for running tests. diff --git a/docs/src/whatsnew/2.3.rst b/docs/src/whatsnew/2.3.rst new file mode 100644 index 0000000000..bec45a6603 --- /dev/null +++ b/docs/src/whatsnew/2.3.rst @@ -0,0 +1,259 @@ +v2.3 (19 Dec 2019) +****************** + +This document explains the changes made to Iris for this release +(:doc:`View all changes `.) + + +Features +======== + +.. _showcase: + +.. admonition:: Support for CF 1.7 + + We have introduced several changes that contribute to Iris's support for + the CF Conventions, including some CF 1.7 additions. We are now able to + support: + + * :ref:`Climatological Coordinates` + * :ref:`Standard name modifiers` + * :ref:`Geostationary projection` + + You can read more about each of these below. + + Additionally, the conventions attribute, added by Iris when saving to + NetCDF, has been updated to ``CF-1.7``, accordingly. + +.. _climatological: +.. admonition:: Climatological Coordinate Support + + Iris can now load, store and save `NetCDF climatological coordinates + `_. Any cube time + coordinate can be marked as a climatological time axis using the boolean + property: ``climatological``. The climatological bounds are stored in the + coordinate's ``bounds`` property. + + When an Iris climatological coordinate is saved in NetCDF, the NetCDF + coordinate variable will be given a 'climatology' attribute, and the + contents of the + coordinate's ``bounds`` property are written to a NetCDF boundary variable + called '_bounds'. These are in place of a standard + 'bounds' attribute and accompanying boundary variable. See below + for an `example adapted from CF conventions `_: + + .. code-block:: none + + dimensions: + time=4; + bnds=2; + variables: + float temperature(time,lat,lon); + temperature:long_name="surface air temperature"; + temperature:cell_methods="time: minimum within years time: mean over years"; + temperature:units="K"; + double time(time); + time:climatology="time_climatology"; + time:units="days since 1960-1-1"; + double time_climatology(time,bnds); + data: // time coordinates translated to date/time format + time="1960-4-16", "1960-7-16", "1960-10-16", "1961-1-16" ; + time_climatology="1960-3-1", "1990-6-1", + "1960-6-1", "1990-9-1", + "1960-9-1", "1990-12-1", + "1960-12-1", "1991-3-1" ; + + If a climatological time axis is detected when loading NetCDF - + indicated by the format described above - the ``climatological`` property + of the Iris coordinate will be set to ``True``. + +.. admonition:: New Chunking Strategy + + Iris now makes better choices of Dask chunk sizes when loading from NetCDF + files: If a file variable has small, specified chunks, Iris will now choose + Dask chunks which are a multiple of these up to a default target size. + + This is particularly relevant to files with an unlimited dimension, which + previously could produce a large number of small chunks. This had an adverse + effect on performance. + + In addition, Iris now takes its default chunk size from the default configured + in Dask itself, i.e. ``dask.config.get('array.chunk-size')``. + +.. admonition:: Lazy Statistics + + Several statistical operations can now be done lazily, taking advantage of the + performance improvements offered by Dask: + + * :meth:`~iris.cube.Cube.aggregated_by` + * :class:`~iris.analysis.RMS` (more detail below) + * :class:`~iris.analysis.MEAN` + +---- + +.. _geostationary: +.. _standard_name: +.. _conventions_1.7: + +* Cube data equality testing (and hence cube equality) now uses a more + relaxed + tolerance : This means that some cubes may now test 'equal' that previously + did not. + Previously, Iris compared cube data arrays using + ``abs(a - b) < 1.e-8`` + + We now apply the default operation of :func:`numpy.allclose` instead, + which is equivalent to + ``abs(a - b) < (1.e-8 + 1.e-5 * b)`` + +* Added support to render HTML for :class:`~iris.cube.CubeList` in Jupyter + Notebooks and JupyterLab. + +* Loading CellMeasures with integer values is now supported. + +* New coordinate system: :class:`iris.coord_systems.Geostationary`, + including load and save support, based on the `CF Geostationary projection + definition `_. + +* :class:`iris.coord_systems.VerticalPerspective` can now be saved to and + loaded from NetCDF files. + +* :class:`iris.experimental.regrid.PointInCell` moved to + :class:`iris.analysis.PointInCell` to make this regridding scheme public + +* Iris now supports standard name modifiers. See + `Appendix C, Standard Name Modifiers `_ + for more information. + +* :meth:`iris.cube.Cube.remove_cell_measure` now also allows removal of a cell + measure by its name (previously only accepted a CellMeasure object). + +* The :data:`iris.analysis.RMS` aggregator now supports a lazy calculation. + However, the "weights" keyword is not currently supported by this, so a + *weighted* calculation will still return a realised result, *and* force + realisation of the original cube data. + +* Iris now supports NetCDF Climate and Forecast (CF) Metadata Conventions 1.7 + (see `CF 1.7 Conventions Document `_ for more information) + +* Updated standard name support to + `CF standard name table version 70, 2019-12-10 `_ + +* Updated UM STASH translations to + `metarelate/metOcean commit 448f2ef, 2019-11-29 `_ + + +Bugs Fixed +========== + +* Cube equality of boolean data is now handled correctly. + +* Fixed a bug where cell measures were incorrect after a cube + :meth:`~iris.cube.Cube.transpose` operation. Previously, this resulted in + cell-measures that were no longer correctly mapped to the cube dimensions. + +* The :class:`~iris.coords.AuxCoord` disregarded masked points and bounds, as + did the :class:`~iris.coords.DimCoord`. Fix permits an + :class:`~iris.coords.AuxCoord` to contain masked points/bounds, and a + TypeError exception is now raised when attempting to create or set the + points/bounds of a :class:`~iris.coords.DimCoord` with arrays with missing + points. + +* :class:`iris.coord_systems.VerticalPerspective` coordinate system now uses + the `CF Vertical perspective definition `_; had been + erroneously using Geostationary. + +* :class:`~iris.coords.CellMethod` will now only use valid + `NetCDF name tokens`_ to reference the coordinates involved in the + statistical operation. + +* The following var_name properties will now only allow valid + `NetCDF name tokens`_ + to reference the said NetCDF variable name. Note that names with a leading + underscore are not permitted. + +.. _NetCDF name tokens: https://www.unidata.ucar.edu/software/netcdf/documentation/NUG/netcdf_data_set_components.html#object_name + + * :attr:`iris.aux_factory.AuxCoordFactory.var_name` + * :attr:`iris.coords.CellMeasure.var_name` + * :attr:`iris.coords.Coord.var_name` + * :attr:`iris.coords.AuxCoord.var_name` + * :attr:`iris.cube.Cube.var_name` + +* Rendering a cube in Jupyter will no longer crash for a cube with + attributes containing ``\n``. + +* NetCDF variables which reference themselves in their ``cell_measures`` + attribute can now be read. + +* :func:`~iris.plot.quiver` now handles circular coordinates. + +* The names of cubes loaded from abf/abl files have been corrected. + +* Fixed a bug in UM file loading, where any landsea-mask-compressed fields + (i.e. with LBPACK=x2x) would cause an error later, when realising the data. + +* :meth:`iris.cube.Cube.collapsed` now handles partial collapsing of + multidimensional coordinates that have bounds. + +* Fixed a bug in the :data:`~iris.analysis.PROPORTION` aggregator, where cube + data in the form of a masked array with ``array.mask=False`` would cause an + error, but possibly only later when the values are actually realised. + ( Note: since netCDF4 version 1.4.0, this is now a common form for data + loaded from netCDF files ). + +* Fixed a bug where plotting a cube with a + :class:`iris.coord_systems.LambertConformal` coordinate system would result + in an error. This would happen if the coordinate system was defined with one + standard parallel, rather than two. + In these cases, a call to + :meth:`~iris.coord_systems.LambertConformal.as_cartopy_crs` would fail. + +* :meth:`iris.cube.Cube.aggregated_by` now gives correct values in points and + bounds when handling multidimensional coordinates. + +* Fixed a bug in the :meth:`iris.cube.Cube.collapsed` operation, which caused + the unexpected realization of any attached auxiliary coordinates that were + *bounded*. It now correctly produces a lazy result and does not realise + the original attached AuxCoords. + + +Internal +======== + +* Iris now supports Proj4 up to version 5, but not yet 6 or beyond, pending + `fixes to some cartopy tests `_. + +* Iris now requires Dask >= 1.2 to allow for improved coordinate equality + checks. + + +Documentation +============= + +* Adopted a + `new colour logo for Iris `_ + +* Added a gallery example showing how to concatenate NEMO ocean model data, + see :ref:`sphx_glr_generated_gallery_oceanography_plot_load_nemo.py`. + +* Added an example for loading Iris cubes for :ref:`using-time-constraints` + in the user guide, demonstrating how to load data within a specified date + range. + +* Added notes to the :func:`iris.load` documentation, and the user guide + :ref:`loading_iris_cubes` chapter, emphasizing that the *order* of the cubes + returned by an iris load operation is effectively random and unstable, and + should not be relied on. + +* Fixed references in the documentation of + :func:`iris.util.find_discontiguities` to a non existent + "mask_discontiguities" routine : these now refer to + :func:`~iris.util.mask_cube`. + diff --git a/docs/src/whatsnew/2.4.rst b/docs/src/whatsnew/2.4.rst new file mode 100644 index 0000000000..0e271389b5 --- /dev/null +++ b/docs/src/whatsnew/2.4.rst @@ -0,0 +1,63 @@ +v2.4 (20 Feb 2020) +****************** + +This document explains the changes made to Iris for this release +(:doc:`View all changes `.) + + +Features +======== + +.. admonition:: Last python 2 version of Iris + + Iris 2.4 is a final extra release of Iris 2, which back-ports specific + desired features from Iris 3 (not yet released). + + The purpose of this is both to support early adoption of certain newer + features, and to provide a final release for Python 2. + + The next release of Iris will be version 3.0 : a major-version release which + introduces breaking API and behavioural changes, and only supports Python 3. + +* :class:`iris.coord_systems.Geostationary` can now accept creation arguments of + `false_easting=None` or `false_northing=None`, equivalent to values of 0. + Previously these kwargs could be omitted, but could not be set to `None`. + This also enables loading of netcdf data on a Geostationary grid, where + either of these keys is not present as a grid-mapping variable + property : Previously, loading any such data caused an exception. + +* The area weights used when performing area weighted regridding with + :class:`iris.analysis.AreaWeighted` are now cached. This allows a + significant speed up when regridding multiple similar cubes, by repeatedly + using a :func:`iris.analysis.AreaWeighted.regridder` objects + which you created first. + +* Name constraint matching against cubes during loading or extracting has been + relaxed from strictly matching against the :meth:`~iris.cube.Cube.name`, to + matching against either the ``standard_name``, ``long_name``, NetCDF + ``var_name``, or ``STASH`` attributes metadata of a cube. + +* Cubes and coordinates now have a new ``names`` property that contains a tuple + of the ``standard_name``, ``long_name``, NetCDF ``var_name``, and ``STASH`` + attributes metadata. + +* The :class:`~iris.NameConstraint` provides richer name constraint matching + when loading or extracting against cubes, by supporting a constraint against + any combination of ``standard_name``, ``long_name``, NetCDF ``var_name`` and + ``STASH`` from the attributes dictionary of a :class:`~iris.cube.Cube`. + + +Bugs Fixed +========== + +* Fixed a problem which was causing file loads to fetch *all* field data + whenever UM files (PP or Fieldsfiles) were loaded. + With large source files, initial file loads are slow, with large memory usage + before any cube data is even fetched. Large enough files will cause a crash. + The problem occurs only with Dask versions >= 2.0. + + +Internal +======== + +* Iris is now able to use the latest version of matplotlib. diff --git a/docs/src/whatsnew/3.0.rst b/docs/src/whatsnew/3.0.rst new file mode 100644 index 0000000000..4107ae5d2b --- /dev/null +++ b/docs/src/whatsnew/3.0.rst @@ -0,0 +1,623 @@ +.. include:: ../common_links.inc + +v3.0 (25 Jan 2021) +****************** + +This document explains the changes made to Iris for this release +(:doc:`View all changes `.) + +.. dropdown:: v3.0.0 Release Highlights + :color: primary + :icon: info + :animate: fade-in + :open: + + The highlights for this major release of Iris include: + + * We've finally dropped support for ``Python 2``, so welcome to ``Iris 3`` + and ``Python 3``! + * We've extended our coverage of the `CF Conventions and Metadata`_ by + introducing support for `CF Ancillary Data`_ and `Quality Flags`_, + * Lazy regridding is now available for several regridding schemes, + * Managing and manipulating metadata within Iris is now easier and more + consistent thanks to the introduction of a new common metadata API, + * :ref:`Cube arithmetic ` has been significantly improved with + regards to extended broadcasting, auto-transposition and a more lenient + behaviour towards handling metadata and coordinates, + * Our :ref:`documentation ` has been refreshed, + restructured, revitalised and rehosted on `readthedocs`_, + * It's now easier than ever to :ref:`install Iris ` + as a user or a developer, and the newly revamped developers guide walks + you though how you can :ref:`get involved ` + and contribute to Iris, + * Also, this is a major release of Iris, so please be aware of the + :ref:`incompatible changes ` and + :ref:`deprecations `. + + And finally, get in touch with us on :issue:`GitHub` if you have + any issues or feature requests for improving Iris. Enjoy! + + +v3.0.1 (27 Jan 2021) +==================== + +.. dropdown:: v3.0.1 Patches + :color: secondary + :icon: alert + :animate: fade-in + + The patches included in this release include: + + 💼 **Internal** + + #. `@bjlittle`_ gracefully promote formula terms within :mod:`~iris.aux_factory` that have ``units`` of ``unknown`` + to ``units`` of ``1`` (dimensionless), where the formula term **must** have dimensionless ``units``. Without this + graceful treatment of ``units`` the resulting :class:`~iris.cube.Cube` will **not** contain the expected auxiliary + factory, and the associated derived coordinate will be missing. (:pull:`3965`) + + +v3.0.2 (27 May 2021) +==================== + +.. dropdown:: v3.0.2 Patches + :color: secondary + :icon: alert + :animate: fade-in + + The patches included in this release include: + + 🐛 **Bugs Fixed** + + #. `@jonseddon`_ handled a malformed ``um_stash_source`` CF variable attribute in + a netCDF file rather than raising a ``ValueError``. (:pull:`4035`) + + #. `@rcomer`_ fixed :meth:`~iris.cube.Cube.intersection` for special cases + where one cell's bounds align with the requested maximum and minimum, as + reported in :issue:`3391`. (:pull:`4059`) + + #. `@bjlittle`_ resolved a regression in arithmetic behaviour between a coordinate + and a cube which resulted in a ``NotYetImplementedError`` being raised, as reported + in :issue:`4000`. This fix supports ``+``, ``-``, ``*``, and ``/`` operations + between a coordinate and a cube, and for convenience additionally includes + :meth:`iris.cube.Cube.__neg__` support. (:pull:`4159`) + + 📚 **Documentation** + + #. `@bjlittle`_ updated the ``intersphinx_mapping`` and fixed documentation + to use ``stable`` URLs for `matplotlib`_. (:pull:`4003`) [``pre-v3.1.0``] + + 💼 **Internal** + + #. `@jamesp`_ updated a test to the latest numpy version (:pull:`3977`) [``pre-v3.1.0``] + + #. `@bjlittle`_ enabled `cirrus-ci`_ compute credits for non-draft pull-requests + from collaborators targeting the Iris ``master`` branch. (:pull:`4007`) + [``pre-v3.1.0``] + + #. `@bjlittle`_ added conditional task execution to ``.cirrus.yml`` to allow + developers to easily disable `cirrus-ci`_ tasks. (:pull:`4019`) [``pre-v3.1.0``] + + #. `@pp-mo`_ adjusted the use of :func:`dask.array.from_array` in :func:`iris._lazy_data.as_lazy_data`, + to avoid the dask 'test access'. This makes loading of netcdf files with a + large number of variables significantly faster. (:pull:`4135`) + + #. `@pp-mo`_ reverted a change made previously in (:pull:`3659`) to + :meth:`iris.fileformats.pp.PPDataProxy.__getitem__`. The check for empty slicings + is no longer needed since (:pull:`4135`) was added. (:pull:`4141`) + + Note that, the above contributions labelled with ``pre-v3.1.0`` are part of the forthcoming + Iris v3.1.0 release, but require to be included in this patch release. + + +v3.0.3 (07 July 2021) +===================== + +.. dropdown:: v3.0.3 Patches + :color: secondary + :icon: alert + :animate: fade-in + + The patches included in this release include: + + 🐛 **Bugs Fixed** + + #. `@lbdreyer`_ modified :meth:`~iris.cube.Cube.intersection` to use a tolerant + equality check, when looking for cells that straddle the wrapping point. + (:pull:`4220`) + + +v3.0.4 (22 July 2021) +===================== + +.. dropdown:: v3.0.4 Patches + :color: secondary + :icon: alert + :animate: fade-in + + The patches included in this release include: + + 🐛 **Bugs Fixed** + + #. `@pp-mo`_ fixed 2 bugs in cube printout: + Firstly, ancillary-variables or cell-measures with long names can now widen the cube "dimensions map" to fit, + whereas previously printing these cases caused an Exception. + Secondly, cube units are now always printed, whereas previously they were missed out any time that the + "dimensions map" was widened to accommodate long coordinate names. + (:pull:`4233`)(:pull:`4238`) + + 💼 **Internal** + + #. `@bjlittle`_ Unpinned the `cftime`_ package dependency within Iris in order + to allow use of the latest versions of `cftime`_, `cf-units`_ and `nc-time-axis`_. + (:pull:`4222`) + + #. `@rcomer`_ modified test modules so they run consistently under ``pytest`` and + ``nose``, and also fixed some minor issues with :class:`~iris.time.PartialDateTime`. + (:pull:`4249`) + + Note that, we are forced to drop support for ``Python 3.6`` in this patch due to + the third-party package dependencies required by (:pull:`4222`). + + +📢 Announcements +================ + +#. Congratulations to `@bouweandela`_, `@jvegasbsc`_, and `@zklaus`_ who + recently became Iris core developers. They bring a wealth of expertise to the + team, and are using Iris to underpin `ESMValTool`_ - "*A community diagnostic + and performance metrics tool for routine evaluation of Earth system models + in CMIP*". Welcome aboard! 🎉 + +#. Congratulations also goes to `@jonseddon`_ who recently became an Iris core + developer. We look forward to seeing more of your awesome contributions! 🎉 + + +✨ Features +=========== + +#. `@MoseleyS`_ greatly enhanced the :mod:`~iris.fileformats.nimrod` + module to provide richer meta-data translation when loading ``Nimrod`` data + into cubes. This covers most known operational use-cases. (:pull:`3647`) + +#. `@stephenworsley`_ improved the handling of + :class:`iris.coords.CellMeasure`\ s in the :class:`~iris.cube.Cube` + statistical operations :meth:`~iris.cube.Cube.collapsed`, + :meth:`~iris.cube.Cube.aggregated_by` and + :meth:`~iris.cube.Cube.rolling_window`. These previously removed every + :class:`~iris.coords.CellMeasure` attached to the cube. Now, a + :class:`~iris.coords.CellMeasure` will only be removed if it is associated + with an axis over which the statistic is being run. (:pull:`3549`) + +#. `@stephenworsley`_, `@pp-mo`_ and `@abooton`_ added support for + `CF Ancillary Data`_ variables. These are created as + :class:`iris.coords.AncillaryVariable`, and appear as components of cubes + much like :class:`~iris.coords.AuxCoord`\ s, with the new + :class:`~iris.cube.Cube` methods + :meth:`~iris.cube.Cube.add_ancillary_variable`, + :meth:`~iris.cube.Cube.remove_ancillary_variable`, + :meth:`~iris.cube.Cube.ancillary_variable`, + :meth:`~iris.cube.Cube.ancillary_variables` and + :meth:`~iris.cube.Cube.ancillary_variable_dims`. + They are loaded from and saved to NetCDF-CF files. Special support for + `Quality Flags`_ is also provided, to ensure they load and save with + appropriate units. (:pull:`3800`) + +#. `@bouweandela`_ implemented lazy regridding for the + :class:`~iris.analysis.Linear`, :class:`~iris.analysis.Nearest`, and + :class:`~iris.analysis.AreaWeighted` regridding schemes. (:pull:`3701`) + +#. `@bjlittle`_ added `logging`_ support within :mod:`iris.analysis.maths`, + :mod:`iris.common.metadata`, and :mod:`iris.common.resolve`. Each module + defines a :class:`logging.Logger` instance called ``logger`` with a default + ``level`` of ``INFO``. To enable ``DEBUG`` logging use + ``logger.setLevel("DEBUG")``. (:pull:`3785`) + +#. `@bjlittle`_ added the :mod:`iris.common.resolve` module, which provides + infrastructure to support the analysis, identification and combination + of metadata common between two :class:`~iris.cube.Cube` operands into a + single resultant :class:`~iris.cube.Cube` that will be auto-transposed, + and with the appropriate broadcast shape. (:pull:`3785`) + +#. `@bjlittle`_ added the :ref:`common metadata API `, which provides + a unified treatment of metadata across Iris, and allows users to easily + manage and manipulate their metadata in a consistent way. (:pull:`3785`) + +#. `@bjlittle`_ added :ref:`lenient metadata ` support, to + allow users to control **strict** or **lenient** metadata equivalence, + difference and combination. (:pull:`3785`) + +#. `@bjlittle`_ added :ref:`lenient cube maths ` support and + resolved several long standing major issues with cube arithmetic regarding + a more robust treatment of cube broadcasting, cube dimension auto-transposition, + and preservation of common metadata and coordinates during cube math operations. + Resolves :issue:`1887`, :issue:`2765`, and :issue:`3478`. (:pull:`3785`) + +#. `@pp-mo`_ and `@TomekTrzeciak`_ enhanced :meth:`~iris.cube.Cube.collapse` to allow a 1-D weights array when + collapsing over a single dimension. + Previously, the weights had to be the same shape as the whole cube, which could cost a lot of memory in some cases. + The 1-D form is supported by most weighted array statistics (such as :meth:`np.average`), so this now works + with the corresponding Iris schemes (in that case, :const:`~iris.analysis.MEAN`). (:pull:`3943`) + + +🐛 Bugs Fixed +============= + +#. `@stephenworsley`_ fixed :meth:`~iris.cube.Cube.remove_coord` to now also + remove derived coordinates by removing aux_factories. (:pull:`3641`) + +#. `@jonseddon`_ fixed ``isinstance(cube, collections.Iterable)`` to now behave + as expected if a :class:`~iris.cube.Cube` is iterated over, while also + ensuring that ``TypeError`` is still raised. (Fixed by setting the + ``__iter__()`` method in :class:`~iris.cube.Cube` to ``None``). + (:pull:`3656`) + +#. `@stephenworsley`_ enabled cube concatenation along an axis shared by cell + measures; these cell measures are now concatenated together in the resulting + cube. Such a scenario would previously cause concatenation to inappropriately + fail. (:pull:`3566`) + +#. `@stephenworsley`_ newly included :class:`~iris.coords.CellMeasure`\ s in + :class:`~iris.cube.Cube` copy operations. Previously copying a + :class:`~iris.cube.Cube` would ignore any attached + :class:`~iris.coords.CellMeasure`. (:pull:`3546`) + +#. `@bjlittle`_ set a :class:`~iris.coords.CellMeasure`'s + ``measure`` attribute to have a default value of ``area``. + Previously, the ``measure`` was provided as a keyword argument to + :class:`~iris.coords.CellMeasure` with a default value of ``None``, which + caused a ``TypeError`` when no ``measure`` was provided, since ``area`` or + ``volume`` are the only accepted values. (:pull:`3533`) + +#. `@trexfeathers`_ set **all** plot types in :mod:`iris.plot` to now use + :obj:`matplotlib.dates.date2num` to format date/time coordinates for use on a plot + axis (previously :meth:`~iris.plot.pcolor` and :meth:`~iris.plot.pcolormesh` + did not include this behaviour). (:pull:`3762`) + +#. `@trexfeathers`_ changed date/time axis labels in :mod:`iris.quickplot` to + now **always** be based on the ``epoch`` used in :obj:`matplotlib.dates.date2num` + (previously would take the unit from a time coordinate, if present, even + though the coordinate's value had been changed via ``date2num``). + (:pull:`3762`) + +#. `@pp-mo`_ newly included attributes of cell measures in NETCDF-CF + file loading; they were previously being discarded. They are now available on + the :class:`~iris.coords.CellMeasure` in the loaded :class:`~iris.cube.Cube`. + (:pull:`3800`) + +#. `@pp-mo`_ fixed the netcdf loader to now handle any grid-mapping + variables with missing ``false_easting`` and ``false_northing`` properties, + which was previously failing for some coordinate systems. See :issue:`3629`. + (:pull:`3804`) + +#. `@stephenworsley`_ changed the way tick labels are assigned from string coords. + Previously, the first tick label would occasionally be duplicated. This also + removes the use of the deprecated `matplotlib`_ ``IndexFormatter``. (:pull:`3857`) + +#. `@znicholls`_ fixed :meth:`~iris.quickplot._title` to only check + ``units.is_time_reference`` if the ``units`` symbol is not used. (:pull:`3902`) + +#. `@rcomer`_ fixed a bug whereby numpy array type attributes on a cube's + coordinates could prevent printing it. See :issue:`3921`. (:pull:`3922`) + +.. _whatsnew 3.0 changes: + +💣 Incompatible Changes +======================= + +#. `@pp-mo`_ rationalised :class:`~iris.cube.CubeList` extraction + methods: + + The former method ``iris.cube.CubeList.extract_strict``, and the ``strict`` + keyword of the :meth:`~iris.cube.CubeList.extract` method have been removed, + and are replaced by the new routines :meth:`~iris.cube.CubeList.extract_cube` + and :meth:`~iris.cube.CubeList.extract_cubes`. + The new routines perform the same operation, but in a style more like other + ``Iris`` functions such as :meth:`~iris.load_cube` and :meth:`~iris.load_cubes`. + Unlike ``strict`` extraction, the type of return value is now completely + consistent : :meth:`~iris.cube.CubeList.extract_cube` always returns a + :class:`~iris.cube.Cube`, and :meth:`~iris.cube.CubeList.extract_cubes` + always returns an :class:`iris.cube.CubeList` of a length equal to the + number of constraints. (:pull:`3715`) + +#. `@pp-mo`_ removed the former function + ``iris.analysis.coord_comparison``. (:pull:`3562`) + +#. `@bjlittle`_ moved the + :func:`iris.experimental.equalise_cubes.equalise_attributes` function from + the :mod:`iris.experimental` module into the :mod:`iris.util` module. Please + use the :func:`iris.util.equalise_attributes` function instead. + (:pull:`3527`) + +#. `@bjlittle`_ removed the module ``iris.experimental.concatenate``. In + ``v1.6.0`` the experimental ``concatenate`` functionality was moved to the + :meth:`iris.cube.CubeList.concatenate` method. Since then, calling the + :func:`iris.experimental.concatenate.concatenate` function raised an + exception. (:pull:`3523`) + +#. `@stephenworsley`_ changed the default units of :class:`~iris.coords.DimCoord` + and :class:`~iris.coords.AuxCoord` from `"1"` to `"unknown"`. (:pull:`3795`) + +#. `@stephenworsley`_ changed Iris objects loaded from NetCDF-CF files to have + ``units='unknown'`` where the corresponding NetCDF variable has no ``units`` + property. Previously these cases defaulted to ``units='1'``. + This affects loading of coordinates whose file variable has no "units" + attribute (not valid, under `CF units rules`_): These will now have units + of `"unknown"`, rather than `"1"`, which **may prevent the creation of + a hybrid vertical coordinate**. While these cases used to "work", this was + never really correct behaviour. (:pull:`3795`) + +#. `@SimonPeatman`_ added attribute ``var_name`` to coordinates created by the + :func:`iris.analysis.trajectory.interpolate` function. This prevents + duplicate coordinate errors in certain circumstances. (:pull:`3718`) + +#. `@bjlittle`_ aligned the :func:`iris.analysis.maths.apply_ufunc` with the + rest of the :mod:`iris.analysis.maths` API by changing its keyword argument + from ``other_cube`` to ``other``. (:pull:`3785`) + +#. `@bjlittle`_ changed the :meth:`iris.analysis.maths.IFunc.__call__` to ignore + any surplus ``other`` keyword argument for a ``data_func`` that requires + **only one** argument. This aligns the behaviour of + :meth:`iris.analysis.maths.IFunc.__call__` with + :func:`~iris.analysis.maths.apply_ufunc`. Previously a ``ValueError`` + exception was raised. (:pull:`3785`) + + +.. _whatsnew 3.0 deprecations: + +🔥 Deprecations +=============== + +#. `@stephenworsley`_ removed the deprecated :class:`iris.Future` flags + ``cell_date_time_objects``, ``netcdf_promote``, ``netcdf_no_unlimited`` and + ``clip_latitudes``. (:pull:`3459`) + +#. `@stephenworsley`_ changed :attr:`iris.fileformats.pp.PPField.lbproc` to be an + ``int``. The deprecated attributes ``flag1``, ``flag2`` etc. have been + removed from it. (:pull:`3461`) + +#. `@bjlittle`_ deprecated :func:`~iris.util.as_compatible_shape` in preference + for :class:`~iris.common.resolve.Resolve` e.g., ``Resolve(src, tgt)(tgt.core_data())``. + The :func:`~iris.util.as_compatible_shape` function will be removed in a future + release of Iris. (:pull:`3892`) + + +🔗 Dependencies +=============== + +#. `@stephenworsley`_, `@trexfeathers`_ and `@bjlittle`_ removed ``Python2`` + support, modernising the codebase by switching to exclusive ``Python3`` + support. (:pull:`3513`) + +#. `@bjlittle`_ improved the developer set up process. Configuring Iris and + :ref:`installing_from_source` as a developer with all the required package + dependencies is now easier with our curated conda environment YAML files. + (:pull:`3812`) + +#. `@stephenworsley`_ pinned Iris to require `Dask`_ ``>=2.0``. (:pull:`3460`) + +#. `@stephenworsley`_ and `@trexfeathers`_ pinned Iris to require + `Cartopy`_ ``>=0.18``, in order to remain compatible with the latest version + of `matplotlib`_. (:pull:`3762`) + +#. `@bjlittle`_ unpinned Iris to use the latest version of `matplotlib`_. + Supporting ``Iris`` for both ``Python2`` and ``Python3`` had resulted in + pinning our dependency on `matplotlib`_ at ``v2.x``. But this is no longer + necessary now that ``Python2`` support has been dropped. (:pull:`3468`) + +#. `@stephenworsley`_ and `@trexfeathers`_ unpinned Iris to use the latest version + of `Proj`_. (:pull:`3762`) + +#. `@stephenworsley`_ and `@trexfeathers`_ removed GDAL from the extensions + dependency group. We no longer consider it to be an extension. (:pull:`3762`) + + +.. _whatsnew 3.0 docs: + +📚 Documentation +================ + +#. `@tkknight`_ moved the + :ref:`sphx_glr_generated_gallery_oceanography_plot_orca_projection.py` + from the general part of the gallery to oceanography. (:pull:`3761`) + +#. `@tkknight`_ updated documentation to use a modern sphinx theme and be + served from https://scitools-iris.readthedocs.io/en/latest/. (:pull:`3752`) + +#. `@bjlittle`_ added support for the `black`_ code formatter. This is + now automatically checked on GitHub PRs, replacing the older, unittest-based + ``iris.tests.test_coding_standards.TestCodeFormat``. Black provides automatic + code format correction for most IDEs. See the new developer guide section on + :ref:`code_formatting`. (:pull:`3518`) + +#. `@tkknight`_ and `@trexfeathers`_ refreshed the :ref:`whats_new_contributions` + for the :ref:`iris_whatsnew`. This includes always creating the ``latest`` + what's new page so it appears on the latest documentation at + https://scitools-iris.readthedocs.io/en/latest/whatsnew. This resolves + :issue:`2104`, :issue:`3451`, :issue:`3818`, :issue:`3837`. Also updated the + :ref:`iris_development_releases_steps` to follow when making a release. + (:pull:`3769`, :pull:`3838`, :pull:`3843`) + +#. `@tkknight`_ enabled the PDF creation of the documentation on the + `Read the Docs`_ service. The PDF may be accessed by clicking on the version + at the bottom of the side bar, then selecting ``PDF`` from the ``Downloads`` + section. (:pull:`3765`) + +#. `@stephenworsley`_ added a warning to the + :func:`iris.analysis.cartography.project` function regarding its behaviour on + projections with non-rectangular boundaries. (:pull:`3762`) + +#. `@stephenworsley`_ added the :ref:`cube_maths_combining_units` section to the + user guide to clarify how ``Units`` are handled during cube arithmetic. + (:pull:`3803`) + +#. `@tkknight`_ overhauled the :ref:`developers_guide` including information on + getting involved in becoming a contributor and general structure of the + guide. This resolves :issue:`2170`, :issue:`2331`, :issue:`3453`, + :issue:`314`, :issue:`2902`. (:pull:`3852`) + +#. `@rcomer`_ added argument descriptions to the :class:`~iris.coords.DimCoord` + docstring. (:pull:`3681`) + +#. `@tkknight`_ added two url's to be ignored for the ``make linkcheck``. This + will ensure the Iris github project is not repeatedly hit during the + linkcheck for issues and pull requests as it can result in connection + refused and thus travis-ci_ job failures. For more information on linkcheck, + see :ref:`contributing.documentation.testing`. (:pull:`3873`) + +#. `@tkknight`_ enabled the napolean_ package that is used by sphinx_ to cater + for the existing google style docstrings and to also allow for `numpy`_ + docstrings. This resolves :issue:`3841`. (:pull:`3871`) + +#. `@tkknight`_ configured ``sphinx-build`` to promote warnings to errors when + building the documentation via ``make html``. This will minimise technical + debt accruing for the documentation. (:pull:`3877`) + +#. `@tkknight`_ updated :ref:`installing_iris` to include a reference to + Windows Subsystem for Linux. (:pull:`3885`) + +#. `@tkknight`_ updated the :ref:`iris_docs` homepage to include panels so the + links are more visible to users. This uses the sphinx-panels_ extension. + (:pull:`3884`) + +#. `@bjlittle`_ created the :ref:`Further topics ` section and + included documentation for :ref:`metadata`, :ref:`lenient metadata`, and + :ref:`lenient maths`. (:pull:`3890`) + +#. `@jonseddon`_ updated the CF version of the netCDF saver in the + :ref:`saving_iris_cubes` section and in the equivalent function docstring. + (:pull:`3925`) + +#. `@bjlittle`_ applied `Title Case Capitalization`_ to the documentation. + (:pull:`3940`) + + +💼 Internal +=========== + +#. `@pp-mo`_ and `@lbdreyer`_ removed all Iris test dependencies on `iris-grib`_ + by transferring all relevant content to the `iris-grib`_ repository. (:pull:`3662`, + :pull:`3663`, :pull:`3664`, :pull:`3665`, :pull:`3666`, :pull:`3669`, + :pull:`3670`, :pull:`3671`, :pull:`3672`, :pull:`3742`, :pull:`3746`) + +#. `@lbdreyer`_ and `@pp-mo`_ overhauled the handling of dimensional + metadata to remove duplication. (:pull:`3422`, :pull:`3551`) + +#. `@trexfeathers`_ simplified the standard license header for all files, which + removes the need to repeatedly update year numbers in the header. + (:pull:`3489`) + +#. `@stephenworsley`_ changed the numerical values in tests involving the + Robinson projection due to improvements made in + `Proj`_. (:pull:`3762`) (see also `Proj#1292`_ and `Proj#2151`_) + +#. `@stephenworsley`_ changed tests to account for more detailed descriptions of + projections in `GDAL`_. (:pull:`3762`) (see also `GDAL#1185`_) + +#. `@stephenworsley`_ changed tests to account for `GDAL`_ now saving fill values + for data without masked points. (:pull:`3762`) + +#. `@trexfeathers`_ changed every graphics test that includes `Cartopy's coastlines`_ + to account for new adaptive coastline scaling. (:pull:`3762`) + (see also `Cartopy#1105`_) + +#. `@trexfeathers`_ changed graphics tests to account for some new default + grid-line spacing in `Cartopy`_. (:pull:`3762`) (see also `Cartopy#1117`_) + +#. `@trexfeathers`_ added additional acceptable graphics test targets to account + for very minor changes in `matplotlib`_ version ``3.3`` (colormaps, fonts and + axes borders). (:pull:`3762`) + +#. `@rcomer`_ corrected the `matplotlib`_ backend in Iris tests to ignore + :obj:`matplotlib.rcdefaults`, instead the tests will **always** use ``agg``. + (:pull:`3846`) + +#. `@bjlittle`_ migrated the `black`_ support from ``19.10b0`` to ``20.8b1``. + (:pull:`3866`) + +#. `@lbdreyer`_ updated the CF standard name table to the latest version: `v75`_. + (:pull:`3867`) + +#. `@bjlittle`_ added :pep:`517` and :pep:`518` support for building and + installing Iris, in particular to handle the `PyKE`_ package dependency. + (:pull:`3812`) + +#. `@bjlittle`_ added metadata support for comparing :attr:`~iris.cube.Cube.attributes` + dictionaries that contain `numpy`_ arrays using `xxHash`_, an extremely fast + non-cryptographic hash algorithm, running at RAM speed limits. + +#. `@bjlittle`_ added the ``iris.tests.assertDictEqual`` method to override + :meth:`unittest.TestCase.assertDictEqual` in order to cope with testing + metadata :attr:`~iris.cube.Cube.attributes` dictionary comparison where + the value of a key may be a `numpy`_ array. (:pull:`3785`) + +#. `@bjlittle`_ added the :func:`~iris.config.get_logger` function for creating + a generic :class:`logging.Logger` with a :class:`logging.StreamHandler` and + custom :class:`logging.Formatter`. (:pull:`3785`) + +#. `@owena11`_ identified and optimised a bottleneck in ``FieldsFile`` header + loading due to the use of :func:`numpy.fromfile`. (:pull:`3791`) + +#. `@znicholls`_ added a test for plotting with the label being taken from the unit's symbol, + see :meth:`~iris.tests.test_quickplot.TestLabels.test_pcolormesh_str_symbol` (:pull:`3902`). + +#. `@znicholls`_ made :func:`~iris.tests.idiff.step_over_diffs` robust to hyphens (``-``) in + the input path (i.e. the ``result_dir`` argument) (:pull:`3902`). + +#. `@bjlittle`_ migrated the CIaaS from `travis-ci`_ to `cirrus-ci`_, and removed `stickler-ci`_ + support. (:pull:`3928`) + +#. `@bjlittle`_ introduced `nox`_ as a common and easy entry-point for test automation. + It can be used both from `cirrus-ci`_ in the cloud, and locally by the developer to + run the Iris tests, the doc-tests, the gallery doc-tests, and lint Iris + with `flake8`_ and `black`_. (:pull:`3928`) + +.. _Read the Docs: https://scitools-iris.readthedocs.io/en/latest/ +.. _CF units rules: https://cfconventions.org/Data/cf-conventions/cf-conventions-1.8/cf-conventions.html#units +.. _CF Ancillary Data: https://cfconventions.org/Data/cf-conventions/cf-conventions-1.8/cf-conventions.html#ancillary-data +.. _Quality Flags: https://cfconventions.org/Data/cf-conventions/cf-conventions-1.8/cf-conventions.html#flags +.. _iris-grib: https://github.com/SciTools/iris-grib +.. _Cartopy: https://github.com/SciTools/cartopy +.. _Cartopy's coastlines: https://scitools.org.uk/cartopy/docs/latest/reference/generated/cartopy.mpl.geoaxes.GeoAxes.html?highlight=coastlines#cartopy.mpl.geoaxes.GeoAxes.coastlines +.. _Cartopy#1105: https://github.com/SciTools/cartopy/pull/1105 +.. _Cartopy#1117: https://github.com/SciTools/cartopy/pull/1117 +.. _Dask: https://github.com/dask/dask +.. _Proj: https://github.com/OSGeo/PROJ +.. _black: https://black.readthedocs.io/en/stable/ +.. _Proj#1292: https://github.com/OSGeo/PROJ/pull/1292 +.. _Proj#2151: https://github.com/OSGeo/PROJ/pull/2151 +.. _GDAL: https://github.com/OSGeo/gdal +.. _GDAL#1185: https://github.com/OSGeo/gdal/pull/1185 +.. _@MoseleyS: https://github.com/MoseleyS +.. _@stephenworsley: https://github.com/stephenworsley +.. _@pp-mo: https://github.com/pp-mo +.. _@abooton: https://github.com/abooton +.. _@bouweandela: https://github.com/bouweandela +.. _@bjlittle: https://github.com/bjlittle +.. _@trexfeathers: https://github.com/trexfeathers +.. _@jonseddon: https://github.com/jonseddon +.. _@tkknight: https://github.com/tkknight +.. _@lbdreyer: https://github.com/lbdreyer +.. _@SimonPeatman: https://github.com/SimonPeatman +.. _@TomekTrzeciak: https://github.com/TomekTrzeciak +.. _@rcomer: https://github.com/rcomer +.. _@jvegasbsc: https://github.com/jvegasbsc +.. _@zklaus: https://github.com/zklaus +.. _@znicholls: https://github.com/znicholls +.. _ESMValTool: https://github.com/ESMValGroup/ESMValTool +.. _v75: https://cfconventions.org/Data/cf-standard-names/75/build/cf-standard-name-table.html +.. _sphinx-panels: https://sphinx-panels.readthedocs.io/en/latest/ +.. _logging: https://docs.python.org/3/library/logging.html +.. _numpy: https://github.com/numpy/numpy +.. _xxHash: https://github.com/Cyan4973/xxHash +.. _PyKE: https://pypi.org/project/scitools-pyke/ +.. _@owena11: https://github.com/owena11 +.. _readthedocs: https://readthedocs.org/ +.. _CF Conventions and Metadata: https://cfconventions.org/ +.. _flake8: https://flake8.pycqa.org/en/stable/ +.. _nox: https://nox.thea.codes/en/stable/ +.. _Title Case Capitalization: https://apastyle.apa.org/style-grammar-guidelines/capitalization/title-case +.. _travis-ci: https://travis-ci.org/github/SciTools/iris +.. _stickler-ci: https://stickler-ci.com/ +.. _cf-units: https://github.com/SciTools/cf-units +.. _cftime: https://github.com/Unidata/cftime +.. _nc-time-axis: https://github.com/SciTools/nc-time-axis diff --git a/docs/src/whatsnew/3.1.rst b/docs/src/whatsnew/3.1.rst new file mode 100644 index 0000000000..02e06bb532 --- /dev/null +++ b/docs/src/whatsnew/3.1.rst @@ -0,0 +1,325 @@ +.. include:: ../common_links.inc + +v3.1 (17 Sep 2021) +****************** + +This document explains the changes made to Iris for this release +(:doc:`View all changes `.) + + +.. dropdown:: v3.1.0 Release Highlights + :color: primary + :icon: info + :animate: fade-in + :open: + + The highlights for this minor release of Iris include: + + * Much faster import times, from minimised loading of submodules. + However, note that this may break existing code which does not declare all imports. + See :ref:`minimised imports `. + * Speedup for loading single phenomena from netcdf. See :ref:`netcdf load speedup `. + * We've dropped support for `Python 3.6`_. See :ref:`minimum Python version 3.7 `. + * Updated formatting of cube printouts. See :ref:`new-style cube printouts `. + * Multiple improvements to developer guide documentation. + See entries in the :ref:`"Documentation" section `, below. + + And finally, get in touch with us on :issue:`GitHub` if you have + any issues or feature requests for improving Iris. Enjoy! + + +📢 Announcements +================ + +#. Congratulations to `@jamesp`_ who recently became an Iris core developer + after joining the Iris development team at the `Met Office`_. 🎉 + +#. A special thanks goes to `@akuhnregnier`_, `@gcaria`_, `@jamesp`_, `@schlunma`_, `@MHBalsmeier`_ + and `@Badboy-16`_ all of whom made their first contributions to Iris, which + were gratefully received and included in this release. Keep up the awesome + work! 🍻 + + +✨ Features +=========== + +#. `@pelson`_ and `@trexfeathers`_ enhanced :meth:`iris.plot.plot` and + :meth:`iris.quickplot.plot` to automatically place the cube on the x axis if + the primary coordinate being plotted against is a vertical coordinate. E.g. + ``iris.plot.plot(z_cube)`` will produce a z-vs-phenomenon plot, where before + it would have produced a phenomenon-vs-z plot. (:pull:`3906`) +#. `@jonseddon`_ added :meth:`iris.plot.barbs` to provide a convenient way to + use :func:`matplotlib.pyplot.barbs` with Iris cubes. A gallery example was + included to illustrate the new method's use. (:pull:`3710`) + +#. `@bjlittle`_ introduced :func:`iris.common.metadata.hexdigest` to the + public API. Previously it was a private function introduced in ``v3.0.0``. + Given any object, :func:`~iris.common.metadata.hexdigest` returns a string + representation of the 64-bit non-cryptographic hash of the object using the + extremely fast `xxhash`_ hashing algorithm. (:pull:`4020`) + +#. `@rcomer`_ implemented a ``__str__`` method for metadata classes, so + printing these objects skips metadata elements that are set to None or an + empty string or dictionary. (:pull:`4040`) + +#. `@Badboy-16`_ implemented a ``CubeList.copy()`` method to return a + ``CubeList`` object instead of a ``list``. (:pull:`4094`) + + .. _newstyle_cube_print: + +#. `@pp-mo`_ and `@trexfeathers`_ reformatted :meth:`iris.cube.Cube.summary`, + (which is used for ``print(Cube)``); putting + :attr:`~iris.cube.Cube.cell_methods` before + :attr:`~iris.cube.Cube.attributes`, and improving spacing throughout. + (:pull:`4206`) + +#. `@schlunma`_ added support for loading atmosphere sigma coordinates from netcdf-CF files. These now load as + :class:`iris.aux_factory.AtmosphereSigmaFactory` derived coordinates. (:pull:`4052`) + + +🐛 Bugs Fixed +============= + +#. `@gcaria`_ fixed :class:`~iris.coords.Cell` comparison with + 0-dimensional arrays and 1-dimensional arrays with len=1. (:pull:`4083`) + +#. `@gcaria`_ fixed :meth:`~iris.cube.Cube.cell_measure_dims` to also accept the + string name of a :class:`~iris.coords.CellMeasure`. (:pull:`3931`) + +#. `@gcaria`_ fixed :meth:`~iris.cube.Cube.ancillary_variable_dims` to also accept + the string name of a :class:`~iris.coords.AncillaryVariable`. (:pull:`3931`) + +#. `@rcomer`_ modified :func:`~iris.plot.contourf` to skip the special handling for + antialiasing when data values are too low for it to have an effect. This caused + unexpected artifacts in some edge cases, as shown at :issue:`4086`. (:pull:`4150`) + +#. `@MHBalsmeier`_ modified :func:`~iris.plot.contourf` to generalize :pull:`4150` + for the cases where NaN values occur in the plot array (:pull:`4263`) + +#. `@trexfeathers`_ fixed the "anomaly_log_colouring" gallery example to be compatible with the latest Matplotlib usage + (:pull:`4115`) + + +🚀 Performance Enhancements +=========================== + +.. _isort_adopt: + +#. `@bjlittle`_ added support for automated ``import`` linting with `isort`_, which + also includes significant speed-ups for Iris imports. (:pull:`4174`) + +#. `@bjlittle`_ Optimised the creation of dynamic metadata manager classes within the + :func:`~iris.common.metadata.metadata_manager_factory`, resulting in a significant + speed-up in the creation of Iris :class:`~iris.coords.AncillaryVariable`, + :class:`~iris.coords.AuxCoord`, :class:`~iris.coords.CellMeasure`, and + :class:`~iris.cube.Cube` instances. (:pull:`4227`) + + .. _fast_nc_load: + +#. `@pp-mo`_ and `@lbdreyer`_ optimised loading netcdf files, resulting in a + speed up when loading with a single :func:`~iris.NameConstraint`. + + For example, ``cube = iris.load('x.nc', NameConstraint('air_temperature'))``. + + Note that this optimisation only applies when matching on standard name, long name or + NetCDF variable name, not when matching on STASH. + (:pull:`4176`) + +💣 Incompatible Changes +======================= + +.. _minimised_imports: + +#. The :ref:`adoption of 'isort' ` has significantly reduced the import time of Iris packages. + + However, this may break existing code which, for convenience, relies on some subpackages being imported implicitly + (as some, but not all, previously were). + + For example: ``import iris; print(iris.cube.Cube)``. + + This style is essentially unsafe, and in this case no longer works. + It must be modified to explicitly import all subpackages, + + i.e. ``import iris.cube; print(iris.cube.Cube)``. + + +🔗 Dependencies +=============== + +#. `@bjlittle`_ dropped both `black`_ and `flake8`_ package dependencies + from our `conda`_ YAML and ``setup.cfg`` PyPI requirements. (:pull:`4181`) + +#. `@pp-mo`_ removed dependency on `PyKE`_. (:pull:`4198`) + +.. _docs_section: + +📚 Documentation +================ + +#. `@rcomer`_ updated the "Seasonal ensemble model plots" and "Global average + annual temperature maps" Gallery examples. (:pull:`3933` and :pull:`3934`) + +#. `@MHBalsmeier`_ described non-conda installation on Debian-based distros. + (:pull:`3958`) + +#. `@bjlittle`_ clarified in the doc-string that :class:`~iris.coords.Coord` + is now an `abstract base class`_ since Iris ``3.0.0``, and it is **not** + possible to create an instance of it. (:pull:`3971`) + +#. `@bjlittle`_ added automated Iris version discovery for the ``latest.rst`` + in the ``whatsnew`` documentation. (:pull:`3981`) + +#. `@tkknight`_ stated the Python version used to build the documentation + on :ref:`installing_iris` and to the footer of all pages. Also added the + copyright years to the footer. (:pull:`3989`) + +#. `@bjlittle`_ updated the ``intersphinx_mapping`` and fixed documentation + to use ``stable`` URLs for `matplotlib`_. (:pull:`4003`) + +#. `@bjlittle`_ added the |PyPI|_ badge to the `README.md`_. (:pull:`4004`) + +#. `@tkknight`_ added a banner at the top of every page of the unreleased + development documentation if being viewed on `Read the Docs`_. + (:pull:`3999`) + +#. `@bjlittle`_ added post-release instructions on how to :ref:`update_pypi` + with `scitools-iris`_. (:pull:`4038`) + +#. `@bjlittle`_ added the |pre-commit.ci|_ badge to the `README.md`_. + See :ref:`pre_commit_ci` for further details. (:pull:`4061`) + +#. `@rcomer`_ tweaked docstring layouts in the :mod:`iris.plot` module, so + they render better in the published documentation. See :issue:`4085`. + (:pull:`4100`) + +#. `@tkknight`_ documented the ``--force`` command line option when creating + a conda development environment. See :ref:`installing_from_source`. + (:pull:`4240`) + +#. `@MHBalsmeier`_ updated and simplified non-conda installation on Debian-based distros. + (:pull:`4260`) + +#. `@bjlittle`_ updated the ``intersphinx_mapping`` and fixed documentation + to use ``stable`` URLs for `matplotlib`_. (:pull:`4003`) + + +💼 Internal +=========== + +#. `@rcomer`_ removed an old unused test file. (:pull:`3913`) + +#. `@tkknight`_ moved the ``docs/iris`` directory to be in the parent + directory ``docs``. (:pull:`3975`) + +#. `@jamesp`_ updated a test for `numpy`_ ``1.20.0``. (:pull:`3977`) + +#. `@bjlittle`_ and `@jamesp`_ extended the `cirrus-ci`_ testing and `nox`_ + testing automation to support `Python 3.8`_. (:pull:`3976`) + +#. `@bjlittle`_ rationalised the ``noxfile.py``, and added the ability for + each ``nox`` session to list its ``conda`` environment packages and + environment info. (:pull:`3990`) + +#. `@bjlittle`_ enabled `cirrus-ci`_ compute credits for non-draft pull-requests + from collaborators targeting the Iris ``main`` branch. (:pull:`4007`) + +#. `@akuhnregnier`_ replaced `deprecated numpy 1.20 aliases for builtin types`_. + (:pull:`3997`) + +#. `@bjlittle`_ added conditional task execution to ``.cirrus.yml`` to allow + developers to easily disable `cirrus-ci`_ tasks. (:pull:`4019`) + +#. `@bjlittle`_ and `@jamesp`_ addressed a regression in behaviour when using + `conda`_ 4.10.0 within `cirrus-ci`_. (:pull:`4084`) + +#. `@bjlittle`_ updated the perceptual imagehash graphical test support for + `matplotlib`_ 3.4.1. (:pull:`4087`) + +#. `@jamesp`_ switched `cirrus-ci`_ testing and `nox`_ + testing to use `conda-lock`_ files for static test environments. (:pull:`4108`) + +#. `@bjlittle`_ updated the ``bug-report`` and ``feature-request`` GitHub issue + templates to remove an external URL reference that caused un-posted user issue + content to be lost in the browser when followed. (:pull:`4147`) + + .. _no_py36: + +#. `@bjlittle`_ dropped `Python 3.6`_ support, and automated the discovery of + supported Python versions tested by `cirrus-ci`_ for documentation. + (:pull:`4163`) + +#. `@bjlittle`_ refactored ``setup.py`` into ``setup.cfg``. (:pull:`4168`) + +#. `@bjlittle`_ consolidated the ``.flake8`` configuration into ``setup.cfg``. + (:pull:`4200`) + +#. `@bjlittle`_ renamed ``iris/master`` branch to ``iris/main`` and migrated + references of ``master`` to ``main`` within codebase. (:pull:`4202`) + +#. `@bjlittle`_ added the `blacken-docs`_ ``pre-commit`` hook to automate + ``black`` linting of documentation code blocks. (:pull:`4205`) + +#. `@bjlittle`_ consolidated `nox`_ ``black``, ``flake8`` and ``isort`` sessions + into one ``lint`` session using ``pre-commit``. (:pull:`4181`) + +#. `@bjlittle`_ streamlined the `cirrus-ci`_ testing by removing the ``minimal`` + tests, which are a subset of the ``full`` tests. (:pull:`4218`) + +#. `@bjlittle`_ consolidated the `cirrus-ci`_ documentation ``doctest`` and + ``gallery`` tasks into a single task and associated `nox`_ session. + (:pull:`4219`) + +#. `@jamesp`_ and `@trexfeathers`_ implemented a benchmarking CI check + using `asv`_. (:pull:`4253`) + +#. `@pp-mo`_ and `@stephenworsley`_ refactored almost all of :meth:`iris.cube.Cube.summary` into the + new private module: :mod:`iris._representation`; rewritten with a more + modular approach, resulting in more readable and extensible code. + (:pull:`3987`) (:pull:`4206`) + +#. `@pp-mo`_ reworked the netcdf loading code, replacing Pyke rules with a pure Python implementation. + (:pull:`4198`) + +#. `@lbdreyer`_ updated the CF standard name table to the latest version: + `v77 `_. + (:pull:`4282`) + +#. `@jamesp`_ updated a test to the latest numpy version (:pull:`3977`) + +#. `@bjlittle`_ enabled `cirrus-ci`_ compute credits for non-draft pull-requests + from collaborators targeting the Iris ``master`` branch. (:pull:`4007`) + +#. `@bjlittle`_ added conditional task execution to ``.cirrus.yml`` to allow + developers to easily disable `cirrus-ci`_ tasks. (:pull:`4019`) + + + +.. comment + Whatsnew author names (@github name) in alphabetical order. Note that, + core dev names are automatically included by the common_links.inc: + +.. _@akuhnregnier: https://github.com/akuhnregnier +.. _@Badboy-16: https://github.com/Badboy-16 +.. _@gcaria: https://github.com/gcaria +.. _@MHBalsmeier: https://github.com/MHBalsmeier +.. _@schlunma: https://github.com/schlunma + +.. comment + Whatsnew resources in alphabetical order: + +.. _abstract base class: https://docs.python.org/3/library/abc.html +.. _asv: https://asv.readthedocs.io/en/stable/ +.. _blacken-docs: https://github.com/asottile/blacken-docs +.. _conda-lock: https://github.com/conda-incubator/conda-lock +.. _deprecated numpy 1.20 aliases for builtin types: https://numpy.org/doc/1.20/release/1.20.0-notes.html#using-the-aliases-of-builtin-types-like-np-int-is-deprecated +.. _Met Office: https://www.metoffice.gov.uk/ +.. _numpy: https://numpy.org/doc/stable/release/1.20.0-notes.html +.. |pre-commit.ci| image:: https://results.pre-commit.ci/badge/github/SciTools/iris/main.svg +.. _pre-commit.ci: https://results.pre-commit.ci/latest/github/SciTools/iris/main +.. _PyKE: https://pypi.org/project/scitools-pyke/ +.. |PyPI| image:: https://img.shields.io/pypi/v/scitools-iris?color=orange&label=pypi%7Cscitools-iris +.. _PyPI: https://pypi.org/project/scitools-iris/ +.. _Python 3.6: https://www.python.org/downloads/release/python-360/ +.. _Python 3.8: https://www.python.org/downloads/release/python-380/ +.. _README.md: https://github.com/SciTools/iris#----- +.. _xxhash: https://cyan4973.github.io/xxHash/ diff --git a/docs/src/whatsnew/3.10.rst b/docs/src/whatsnew/3.10.rst new file mode 100644 index 0000000000..9007f6f9a6 --- /dev/null +++ b/docs/src/whatsnew/3.10.rst @@ -0,0 +1,245 @@ +.. include:: ../common_links.inc + +v3.10 (13 Aug 2024) +******************* + +This document explains the changes made to Iris for this release +(:doc:`View all changes `.) + + +.. dropdown:: v3.10 Release Highlights + :color: primary + :icon: info + :animate: fade-in + :open: + + The highlights for this minor release of Iris include: + + * Breaking Change: We have moved all of the mesh API from :mod:`iris.experimental.ugrid` to + :mod:`iris.mesh`. This is no longer experimental making this public supported API. + + * We have made a suite of mesh improvements, there is a separate entry below for each of these changes . + + * We have made :meth:`~iris.coords.guess_bounds` capable of setting bounds to the start and end of months and years. + + * We have significantly reduced warning noise during NetCDF loading. The datum :class:`python:FutureWarning` + will now only be raised if the + ``datum_support`` :class:`~iris.Future` flag is disabled AND a datum is + present on the loaded NetCDF grid mapping. + + * Checkout the performance enhancements section for an array of improvements to the performance of Iris. + Special thanks to the `ESMValTool`_ devs for these contributions. + + + And finally, get in touch with us on :issue:`GitHub` if you have + any issues or feature requests for improving Iris. Enjoy! + + +📢 Announcements +================ + +#. Breaking Change: We have moved all of the mesh API from :mod:`iris.experimental.ugrid` to + :mod:`iris.mesh`. This is no longer experimental making this public supported API. + Future changes will honour Semantic Versioning - i.e. breaking changes will only be in major releases, + and ideally will be previewed via :class:`iris.Future` flags. + +#. Note that Iris is currently pinned to NumPy ``<2``, we hope to unpin this in the next minor release (Iris v3.11). + + +✨ Features +=========== + +#. `@ESadek-MO`_ updated the error messages in :meth:`iris.cube.CubeList.concatenate` + to better explain the error. (:pull:`6005`) + +#. `@trexfeathers`_ added the + :meth:`~iris.experimental.ugrid.mesh.MeshCoord.collapsed` method to + :class:`~iris.experimental.ugrid.mesh.MeshCoord`, enabling collapsing of + the :class:`~iris.cube.Cube` :attr:`~iris.cube.Cube.mesh_dim` (see + :ref:`cube-statistics-collapsing`). (:issue:`5377`, :pull:`6003`) + +#. `@pp-mo`_ made a MeshCoord inherit a coordinate system from its location coord, + as it does its metadata. N.B. mesh location coords can not however load a + coordinate system from netcdf at present, as this needs the 'extended' + grid-mappping syntax -- see : :issue:`3388`. + (:issue:`5562`, :pull:`6016`) + +#. `@HGWright`_ added the `monthly` and `yearly` options to the + :meth:`~iris.coords.guess_bounds` method. (:issue:`4864`, :pull:`6090`) + +#. `@HGWright`_ updated to the latest CF Standard Names Table v85 + (30 July 2024). (:pull:`6100`) + + +🐛 Bugs Fixed +============= + +#. `@bouweandela`_ updated the ``chunktype`` of Dask arrays, so it corresponds + to the array content. (:pull:`5801`) + +#. `@rcomer`_ made the :obj:`~iris.analysis.WPERCENTILE` aggregator work with + :func:`~iris.cube.Cube.rolling_window`. (:issue:`5777`, :pull:`5825`) + + +#. `@pp-mo`_ corrected the use of mesh dimensions when saving with multiple + meshes. (:issue:`5908`, :pull:`6004`) + +#. `@trexfeathers`_ fixed the datum :class:`python:FutureWarning` to only be raised if + the ``datum_support`` :class:`~iris.Future` flag is disabled AND a datum is + present on the loaded NetCDF grid mapping. (:issue:`5749`, :pull:`6050`) + + +💣 Incompatible Changes +======================= + +#. `@rcomer`_ removed the *target* parameter from + :func:`~iris.fileformats.pp.as_fields` and + :func:`~iris.fileformats.pp.save_pairs_from_cube` because it had no effect. + (:pull:`5783`) + +#. `@stephenworsley`_ made masked arrays on Iris objects now compare as equal + precisely when all unmasked points are equal and when the masks are identical. + This is due to changes in :func:`~iris.util.array_equal` which previously + ignored masks entirely. (:pull:`4457`) + +#. `@trexfeathers`_ renamed the ``Mesh`` class to + :class:`~iris.experimental.ugrid.mesh.MeshXY`, in preparation for a future + more flexible parent class (:class:`~iris.experimental.ugrid.mesh.Mesh`). + (:issue:`6052` :pull:`6056`) + +#. `@stephenworsley`_ replaced the ``include_nodes``, ``include_edges`` and + ``include_faces`` arguments with a single ``location`` argument in the + :class:`~iris.experimental.ugrid.Mesh` methods + :meth:`~iris.experimental.ugrid.Mesh.coord`, :meth:`~iris.experimental.ugrid.Mesh.coords` + and :meth:`~iris.experimental.ugrid.Mesh.remove_coords`. (:pull:`6055`) + +#. `@pp-mo`_ moved all the mesh API from the :mod:`iris.experimental.ugrid` module to + to :mod:`iris.mesh`, making this public supported API. Note that the + :class:`iris.experimental.ugrid.Mesh` class is renamed as :class:`iris.mesh.MeshXY`, + to allow for possible future mesh types with different properties to exist as + subclasses of a common generic :class:`~iris.mesh.components.Mesh` class. + (:issue:`6057`, :pull:`6061`, :pull:`6077`) + +#. `@pp-mo`_ and `@stephenworsley`_ Turned on UGRID loading by default, effectively removing + the need for and deprecating the :func:`~iris.ugrid.experimental.PARSE_UGRID_ON_LOAD` + context manager. (:pull:`6054`, :pull:`6088`) + + +🚀 Performance Enhancements +=========================== + +#. `@bouweandela`_ added the option to specify the Dask chunks of the target + array in :func:`iris.util.broadcast_to_shape`. (:pull:`5620`) + +#. `@schlunma`_ allowed :func:`iris.analysis.cartography.area_weights` to + return dask arrays with arbitrary chunks. (:pull:`5658`) + +#. `@bouweandela`_ made :meth:`iris.cube.Cube.rolling_window` work with lazy + data. (:pull:`5795`) + +#. `@bouweandela`_ updated :meth:`iris.cube.CubeList.concatenate` so it keeps + ancillary variables and cell measures lazy. (:pull:`6010`) + +#. `@bouweandela`_ made :meth:`iris.cube.CubeList.concatenate` faster for cubes + that have coordinate factories. (:pull:`6038`) + +🔥 Deprecations +=============== + +None! + + +🔗 Dependencies +=============== + +#. `@tkknight`_ removed the pin for ``sphinx <=5.3``, so the latest should + now be used, currently being v7.2.6. + (:pull:`5901`) + +#. `@trexfeathers`_ updated the :mod:`iris.experimental.geovista` + documentation's use of :class:`geovista.geodesic.BBox` + to be compatible with GeoVista v0.5, as well as previous versions. + (:pull:`6064`) + +#. `@pp-mo`_ temporarily pinned matplotlib to ">=3.5, !=3.9.1", to avoid current CI + test failures on plot results, apparently due to a matplotlib bug. + See : https://github.com/matplotlib/matplotlib/issues/28567 + (:pull:`6065`) + +#. Note that Iris is currently pinned to NumPy ``<2``, we hope to unpin this in the next minor release (Iris v3.11). + + + +📚 Documentation +================ + +#. `@hsteptoe`_ added more detailed examples to :class:`~iris.cube.Cube` functions + :func:`~iris.cube.Cube.slices` and :func:`~iris.cube.Cube.slices_over`. (:pull:`5735`) + + +💼 Internal +=========== + +#. `@bouweandela`_ removed a workaround in :meth:`~iris.cube.CubeList.merge` for an + issue with :func:`dask.array.stack` which has been solved since 2017. (:pull:`5923`) + +#. `@trexfeathers`_ introduced a temporary fix for Airspeed Velocity's + deprecated use of the ``conda --force`` argument. To be removed once + `airspeed-velocity/asv#1397`_ is merged and released. (:pull:`5931`) + +#. `@trexfeathers`_ created :func:`iris.tests.stock.realistic_4d_w_everything`; + providing a :class:`~iris.cube.Cube` aimed to exercise as much of Iris as + possible. (:pull:`5949`) + +#. `@trexfeathers`_ deactivated any small 'unit-style' benchmarks for default + benchmark runs, and introduced larger more 'real world' benchmarks where + coverage was needed. (:pull:`5949`). + +#. `@trexfeathers`_ made a Nox `benchmarks` session as the recommended entry + point for running benchmarks. (:pull:`5951`) + +#. `@ESadek-MO`_ added further `benchmarks` for aggregation and collapse. + (:pull:`5954`) + +#. `@trexfeathers`_ set the benchmark data generation environment to + automatically install iris-test-data during setup. (:pull:`5958`) + +#. `@pp-mo`_ reworked benchmark peak-memory measurement to use the + `tracemalloc `_ + package. + (:pull:`5948`) + +#. `@pp-mo`_ added a benchmark 'trialrun' sub-command, to quickly test + benchmarks during development. (:pull:`5957`) + +#. `@pp-mo`_ moved several memory-measurement benchmarks from 'on-demand' to + the standard set, in hopes that use of 'tracemalloc' (:pull:`5948`) makes + the results consistent enough to monitor for performance changes. + (:pull:`5959`) + +#. `@rcomer`_ made some :meth:`~iris.cube.Cube.slices_over` tests go faster (:pull:`5973`) + +#. `@bouweandela`_ enabled mypy checks for type hints. + The entire team would like to thank Bouwe for putting in the hard + work on an unglamorous but highly valuable contribution. (:pull:`5956`) + +#. `@trexfeathers`_ re-wrote the delegated ASV environment plugin to reduce + complexity, remove unnecessary slow operations, apply the least-surprise + principle, be more robust against failures, and improve the ability to + benchmark historic commits (especially older Python versions). + (:pull:`5963`) + +#. `@bouweandela`_ made some tests for :func:`~iris.iterate.izip` faster. (:pull:`6041`) + +.. comment + Whatsnew author names (@github name) in alphabetical order. Note that, + core dev names are automatically included by the common_links.inc: + +.. _@hsteptoe: https://github.com/hsteptoe + + +.. comment + Whatsnew resources in alphabetical order: + +.. _airspeed-velocity/asv#1397: https://github.com/airspeed-velocity/asv/pull/1397 +.. _ESMValTool: https://github.com/ESMValGroup/ESMValTool diff --git a/docs/src/whatsnew/3.2.rst b/docs/src/whatsnew/3.2.rst new file mode 100644 index 0000000000..387cb32a26 --- /dev/null +++ b/docs/src/whatsnew/3.2.rst @@ -0,0 +1,405 @@ +.. include:: ../common_links.inc + +v3.2 (15 Feb 2022) +****************** + +This document explains the changes made to Iris for this release +(:doc:`View all changes `.) + +.. dropdown:: v3.2.0 Release Highlights + :color: primary + :icon: info + :animate: fade-in + :open: + + The highlights for this minor release of Iris include: + + * We've added experimental support for + :ref:`Meshes `, which can now be loaded and + attached to a cube. Mesh support is based on the `CF-UGRID`_ model. + * We've also dropped support for ``Python 3.7``. + + And finally, get in touch with us on :issue:`GitHub` if you have + any issues or feature requests for improving Iris. Enjoy! + + +v3.2.1 (11 Mar 2022) +==================== + +.. dropdown:: v3.2.1 Patches + :color: secondary + :icon: alert + :animate: fade-in + + 📢 **Welcome** to `@dennissergeev`_, who made his first contribution to Iris. Nice work! + + The patches in this release of Iris include: + + 🐛 **Bugs Fixed** + + #. `@dennissergeev`_ changed _crs_distance_differentials() so that it uses the `Globe` + attribute from a given CRS instead of creating a new `ccrs.Globe()` object. + Iris can now handle non-Earth semi-major axes, as discussed in :issue:`4582` (:pull:`4605`). + + #. `@trexfeathers`_ avoided a dimensionality mismatch when streaming the + :attr:`~iris.coords.Coord.bounds` array for a scalar + :class:`~iris.coords.Coord`. (:pull:`4610`). + + +📢 Announcements +================ + +#. Welcome to `@wjbenfold`_, `@tinyendian`_, `@larsbarring`_, `@bsherratt`_ and + `@aaronspring`_ who made their first contributions to Iris. The first of + many we hope! +#. Congratulations to `@wjbenfold`_ who has become a core developer for Iris! 🎉 + + +✨ Features +=========== + +#. `@bjlittle`_, `@pp-mo`_, `@trexfeathers`_ and `@stephenworsley`_ added + support for :ref:`unstructured meshes `. This involved + adding a data model (:pull:`3968`, :pull:`4014`, :pull:`4027`, :pull:`4036`, + :pull:`4053`, :pull:`4439`) and API (:pull:`4063`, :pull:`4064`), and + supporting representation (:pull:`4033`, :pull:`4054`) of data on meshes. + Most of this new API can be found in :mod:`iris.experimental.ugrid`. The key + objects introduced are :class:`iris.experimental.ugrid.mesh.MeshXY`, + :class:`iris.experimental.ugrid.mesh.MeshCoord` and + :obj:`iris.experimental.ugrid.load.PARSE_UGRID_ON_LOAD`. + A :class:`~iris.experimental.ugrid.mesh.MeshXY` contains a full description of a UGRID + type mesh. :class:`~iris.experimental.ugrid.mesh.MeshCoord`\ s are coordinates that + reference and represent a :class:`~iris.experimental.ugrid.mesh.MeshXY` for use + on a :class:`~iris.cube.Cube`. :class:`~iris.cube.Cube`\ s are also given the + property :attr:`~iris.cube.Cube.mesh` which returns a + :class:`~iris.experimental.ugrid.mesh.MeshXY` if one is attached to the + :class:`~iris.cube.Cube` via a :class:`~iris.experimental.ugrid.mesh.MeshCoord`. + +#. `@trexfeathers`_ added support for loading unstructured mesh data from netcdf data, + for files using the `CF-UGRID`_ conventions. + The context manager :obj:`~iris.experimental.ugrid.load.PARSE_UGRID_ON_LOAD` + provides a way to load UGRID files so that :class:`~iris.cube.Cube`\ s can be + returned with a :class:`~iris.experimental.ugrid.mesh.MeshXY` attached. + (:pull:`4058`). + +#. `@pp-mo`_ added support to save cubes with :ref:`meshes ` to netcdf + files, using the `CF-UGRID`_ conventions. + The existing :meth:`iris.save` function now does this, when saving cubes with meshes. + A routine :meth:`iris.experimental.ugrid.save.save_mesh` allows saving + :class:`~iris.experimental.ugrid.mesh.MeshXY` objects to netcdf *without* any associated data + (i.e. not attached to cubes). + (:pull:`4318` and :pull:`4339`). + +#. `@trexfeathers`_ added :meth:`iris.experimental.ugrid.mesh.MeshXY.from_coords` + for inferring a :class:`~iris.experimental.ugrid.mesh.MeshXY` from an + appropriate collection of :class:`iris.coords.Coord`\ s. + +#. `@larsbarring`_ updated :func:`~iris.util.equalise_attributes` to return a list of dictionaries + containing the attributes removed from each :class:`~iris.cube.Cube`. (:pull:`4357`) + +#. `@trexfeathers`_ enabled streaming of **all** lazy arrays when saving to + NetCDF files (was previously just :class:`~iris.cube.Cube` + :attr:`~iris.cube.Cube.data`). This is + important given the much greater size of + :class:`~iris.coords.AuxCoord` :attr:`~iris.coords.AuxCoord.points` and + :class:`~iris.experimental.ugrid.mesh.Connectivity` + :attr:`~iris.experimental.ugrid.mesh.Connectivity.indices` under the + :ref:`mesh model `. (:pull:`4375`) + +#. `@bsherratt`_ added a ``threshold`` parameter to + :meth:`~iris.cube.Cube.intersection` (:pull:`4363`) + +#. `@wjbenfold`_ added test data to ci benchmarks so that it is accessible to + benchmark scripts. Also added a regridding benchmark that uses this data + (:pull:`4402`) + +#. `@pp-mo`_ updated to the latest CF Standard Names Table ``v78`` (21 Sept 2021). + (:issue:`4479`, :pull:`4483`) + +#. `@SimonPeatman`_ added support for filenames in the form of a :class:`~pathlib.PurePath` + in :func:`~iris.load`, :func:`~iris.load_cube`, :func:`~iris.load_cubes`, + :func:`~iris.load_raw` and :func:`~iris.save` (:issue:`3411`, :pull:`3917`). + Support for :class:`~pathlib.PurePath` is yet to be implemented across the rest + of Iris (:issue:`4523`). + +#. `@pp-mo`_ removed broken tooling for deriving Iris metadata translations + from ``Metarelate``. From now we intend to manage phenonemon translation + in Iris itself. (:pull:`4484`) + +#. `@pp-mo`_ improved printout of various cube data component objects : + :class:`~iris.coords.Coord`, :class:`~iris.coords.CellMeasure`, + :class:`~iris.coords.AncillaryVariable`, + :class:`~iris.experimental.ugrid.mesh.MeshCoord` and + :class:`~iris.experimental.ugrid.mesh.MeshXY`. + These now all provide a more controllable ``summary()`` method, and + more convenient and readable ``str()`` and ``repr()`` output in the style of + the :class:`iris.cube.Cube`. + They also no longer realise lazy data. (:pull:`4499`). + + +🐛 Bugs Fixed +============= + +#. `@rcomer`_ fixed :meth:`~iris.cube.Cube.intersection` for special cases where + one cell's bounds align with the requested maximum and negative minimum, fixing + :issue:`4221`. (:pull:`4278`) + +#. `@bsherratt`_ fixed further edge cases in + :meth:`~iris.cube.Cube.intersection`, including :issue:`3698` (:pull:`4363`) + +#. `@tinyendian`_ fixed the error message produced by :meth:`~iris.cube.CubeList.concatenate_cube` + when a cube list contains cubes with different names, which will no longer report + "Cube names differ: var1 != var1" if var1 appears multiple times in the list + (:issue:`4342`, :pull:`4345`) + +#. `@larsbarring`_ fixed :class:`~iris.coord_systems.GeoCS` to handle spherical ellipsoid + parameter inverse_flattening = 0 (:issue:`4146`, :pull:`4348`) + +#. `@pdearnshaw`_ fixed an error in the call to :class:`cftime.datetime` in + :mod:`~iris.fileformats.pp_save_rules` that prevented the saving to PP of climate + means for DJF (:pull:`4391`) + +#. `@wjbenfold`_ improved the error message for failure of :meth:`~iris.cube.CubeList.concatenate` + to indicate that the value of a scalar coordinate may be mismatched, rather than the metadata + (:issue:`4096`, :pull:`4387`) + +#. `@bsherratt`_ fixed a regression to the NAME file loader introduced in 3.0.4, + as well as some long-standing bugs with vertical coordinates and number + formats. (:pull:`4411`) + +#. `@rcomer`_ fixed :meth:`~iris.cube.Cube.subset` to always return ``None`` if + no value match is found. (:pull:`4417`) + +#. `@wjbenfold`_ changed :meth:`iris.util.points_step` to stop it from warning + when applied to a single point (:issue:`4250`, :pull:`4367`) + +#. `@trexfeathers`_ changed :class:`~iris.coords._DimensionalMetadata` and + :class:`~iris.experimental.ugrid.Connectivity` equality methods to preserve + array laziness, allowing efficient comparisons even with larger-than-memory + objects. (:pull:`4439`) + +#. `@rcomer`_ modified :meth:`~iris.cube.Cube.aggregated_by` to calculate new + coordinate bounds using minimum and maximum for unordered coordinates, + fixing :issue:`1528`. (:pull:`4315`) + +#. `@wjbenfold`_ changed how a delayed unit conversion is performed on a cube + so that a cube with lazy data awaiting a unit conversion can be pickled. + (:issue:`4354`, :pull:`4377`) + +#. `@pp-mo`_ fixed a bug in netcdf loading, whereby *any* rotated latlon coordinate + was mistakenly interpreted as a latitude, usually resulting in two 'latitude's + instead of one latitude and one longitude. + (:issue:`4460`, :pull:`4470`) + +#. `@wjbenfold`_ stopped :meth:`iris.coord_systems.GeogCS.as_cartopy_projection` + from assuming the globe to be the Earth (:issue:`4408`, :pull:`4497`) + +#. `@rcomer`_ corrected the ``long_name`` mapping from UM stash code ``m01s09i215`` + to indicate cloud fraction greater than 7.9 oktas, rather than 7.5 + (:issue:`3305`, :pull:`4535`) + +#. `@lbdreyer`_ fixed a bug in :class:`iris.io.load_http` which was missing an import + (:pull:`4580`) + + +💣 Incompatible Changes +======================= + +#. N/A + + +🚀 Performance Enhancements +=========================== + +#. `@wjbenfold`_ resolved an issue that previously caused regridding with lazy + data to take significantly longer than with real data. Benchmark + :class:`benchmarks.HorizontalChunkedRegridding` shows a time decrease + from >10s to 625ms. (:issue:`4280`, :pull:`4400`) + +#. `@bjlittle`_ included an optimisation to :class:`~iris.cube.Cube.coord_dims` + to avoid unnecessary processing whenever a coordinate instance that already + exists within the cube is provided. (:pull:`4549`) + + +🔥 Deprecations +=============== + +#. `@wjbenfold`_ removed :mod:`iris.experimental.equalise_cubes`. In ``v3.0`` + the experimental ``equalise_attributes`` functionality was moved to the + :mod:`iris.util.equalise_attributes` function. Since then, calling the + :func:`iris.experimental.equalise_cubes.equalise_attributes` function raised + an exception. (:issue:`3528`, :pull:`4496`) + +#. `@wjbenfold`_ deprecated :func:`iris.util.approx_equal` in preference for + :func:`math.isclose`. The :func:`~iris.util.approx_equal` function will be + removed in a future release of Iris. (:pull:`4514`) + +#. `@wjbenfold`_ deprecated :mod:`iris.experimental.raster` as it is not + believed to still be in use. The deprecation warnings invite users to contact + the Iris Developers if this isn't the case. (:pull:`4525`) + +#. `@wjbenfold`_ deprecated :mod:`iris.fileformats.abf` and + :mod:`iris.fileformats.dot` as they are not believed to still be in use. The + deprecation warnings invite users to contact the Iris Developers if this + isn't the case. (:pull:`4515`) + +#. `@wjbenfold`_ removed the :func:`iris.util.as_compatible_shape` function, + which was deprecated in ``v3.0``. Instead use + :class:`iris.common.resolve.Resolve`. For example, rather than calling + ``as_compatible_shape(src_cube, target_cube)`` replace with + ``Resolve(src_cube, target_cube)(target_cube.core_data())``. (:pull:`4513`) + +#. `@wjbenfold`_ deprecated :func:`iris.analysis.maths.intersection_of_cubes` in + preference for :meth:`iris.cube.CubeList.extract_overlapping`. The + :func:`~iris.analysis.maths.intersection_of_cubes` function will be removed in + a future release of Iris. (:pull:`4541`) + +#. `@pp-mo`_ deprecated :mod:`iris.experimental.regrid_conservative`. This is + now replaced by `iris-emsf-regrid`_. (:pull:`4551`) + +#. `@pp-mo`_ deprecated everything in :mod:`iris.experimental.regrid`. + Most features have a preferred exact alternative, as suggested, *except* + :class:`iris.experimental.regrid.ProjectedUnstructuredLinear` : that has no + identical equivalent, but :class:`iris.analysis.UnstructuredNearest` is + suggested as being quite close (though possibly slower). (:pull:`4548`) + + +🔗 Dependencies +=============== + +#. `@bjlittle`_ introduced the ``cartopy >=0.20`` minimum pin. + (:pull:`4331`) + +#. `@trexfeathers`_ introduced the ``cf-units >=3`` and ``nc-time-axis >=1.3`` + minimum pins. (:pull:`4356`) + +#. `@bjlittle`_ introduced the ``numpy >=1.19`` minimum pin, in + accordance with `NEP-29`_ deprecation policy. (:pull:`4386`) + +#. `@bjlittle`_ dropped support for ``Python 3.7``, as per the `NEP-29`_ + backwards compatibility and deprecation policy schedule. (:pull:`4481`) + + +📚 Documentation +================ + +#. `@rcomer`_ updated the "Plotting Wind Direction Using Quiver" Gallery + example. (:pull:`4120`) + +#. `@trexfeathers`_ included Iris `GitHub Discussions`_ in + :ref:`get involved `. (:pull:`4307`) + +#. `@wjbenfold`_ improved readability in :ref:`userguide interpolation + section `. (:pull:`4314`) + +#. `@wjbenfold`_ added explanation about the absence of | operator for + :class:`iris.Constraint` to :ref:`userguide loading section + ` and to api reference documentation. (:pull:`4321`) + +#. `@trexfeathers`_ added more detail on making `iris-test-data`_ available + during :ref:`developer_running_tests`. (:pull:`4359`) + +#. `@lbdreyer`_ added a section to the release documentation outlining the role + of the :ref:`release_manager`. (:pull:`4413`) + +#. `@trexfeathers`_ encouraged contributors to include type hinting in code + they are working on - :ref:`code_formatting`. (:pull:`4390`) + +#. `@wjbenfold`_ updated Cartopy documentation links to point to the renamed + :class:`cartopy.mpl.geoaxes.GeoAxes`. (:pull:`4464`) + +#. `@wjbenfold`_ clarified behaviour of :func:`iris.load` in :ref:`userguide + loading section `. (:pull:`4462`) + +#. `@bjlittle`_ migrated readthedocs to use mambaforge for `faster documentation building`_. + (:pull:`4476`) + +#. `@wjbenfold`_ contributed `@alastair-gemmell`_'s :ref:`step-by-step guide to + contributing to the docs ` to the docs. + (:pull:`4461`) + +#. `@pp-mo`_ improved and corrected docstrings of + :class:`iris.analysis.PointInCell`, making it clear what is the actual + calculation performed. (:pull:`4548`) + +#. `@pp-mo`_ removed reference in docstring of + :class:`iris.analysis.UnstructuredNearest` to the obsolete (deprecated) + :class:`iris.experimental.regrid.ProjectedUnstructuredNearest`. + (:pull:`4548`) + + +💼 Internal +=========== + +#. `@trexfeathers`_ set the linkcheck to ignore + https://www.nationalarchives.gov.uk/doc/open-government-licence since this + always works locally, but never within CI. (:pull:`4307`) + +#. `@wjbenfold`_ netCDF integration tests now skip ``TestConstrainedLoad`` if + test data is missing (:pull:`4319`) + +#. `@wjbenfold`_ excluded ``Good First Issue`` labelled issues from being + marked stale. (:pull:`4317`) + +#. `@tkknight`_ added additional make targets for reducing the time of the + documentation build including ``html-noapi`` and ``html-quick``. + Useful for development purposes only. For more information see + :ref:`contributing.documentation.building` the documentation. (:pull:`4333`) + +#. `@rcomer`_ modified the ``animation`` test to prevent it throwing a warning + that sometimes interferes with unrelated tests. (:pull:`4330`) + +#. `@rcomer`_ removed a now redundant workaround in :func:`~iris.plot.contourf`. + (:pull:`4349`) + +#. `@trexfeathers`_ refactored :mod:`iris.experimental.ugrid` into sub-modules. + (:pull:`4347`). + +#. `@bjlittle`_ enabled the `sort-all`_ `pre-commit`_ hook to automatically + sort ``__all__`` entries into alphabetical order. (:pull:`4353`) + +#. `@rcomer`_ modified a NetCDF saver test to prevent it triggering a numpy + deprecation warning. (:issue:`4374`, :pull:`4376`) + +#. `@akuhnregnier`_ removed addition of period from + :func:`~iris.analysis.cartography.wrap_lons` and updated affected tests + using ``assertArrayAllClose`` following :issue:`3993`. + (:pull:`4421`) + +#. `@rcomer`_ updated some tests to work with Matplotlib v3.5. (:pull:`4428`) + +#. `@rcomer`_ applied minor fixes to some regridding tests. (:pull:`4432`) + +#. `@lbdreyer`_ corrected the license PyPI classifier. (:pull:`4435`) + +#. `@aaronspring`_ exchanged ``dask`` with + ``dask-core`` in testing environments reducing the number of dependencies + installed for testing. (:pull:`4434`) + +#. `@wjbenfold`_ prevented github action runs in forks (:issue:`4441`, + :pull:`4444`) + +#. `@wjbenfold`_ fixed tests for hybrid formulae that weren't being found by + nose (:issue:`4431`, :pull:`4450`) + +.. comment + Whatsnew author names (@github name) in alphabetical order. Note that, + core dev names are automatically included by the common_links.inc: + +.. _@aaronspring: https://github.com/aaronspring +.. _@akuhnregnier: https://github.com/akuhnregnier +.. _@bsherratt: https://github.com/bsherratt +.. _@dennissergeev: https://github.com/dennissergeev +.. _@larsbarring: https://github.com/larsbarring +.. _@pdearnshaw: https://github.com/pdearnshaw +.. _@SimonPeatman: https://github.com/SimonPeatman +.. _@tinyendian: https://github.com/tinyendian + +.. comment + Whatsnew resources in alphabetical order: + +.. _NEP-29: https://numpy.org/neps/nep-0029-deprecation_policy.html +.. _UGRID: https://ugrid-conventions.github.io/ugrid-conventions/ +.. _iris-emsf-regrid: https://github.com/SciTools-incubator/iris-esmf-regrid +.. _faster documentation building: https://docs.readthedocs.io/en/stable/guides/conda.html#making-builds-faster-with-mamba +.. _sort-all: https://github.com/aio-libs/sort-all diff --git a/docs/src/whatsnew/3.3.rst b/docs/src/whatsnew/3.3.rst new file mode 100644 index 0000000000..4ab5a2e973 --- /dev/null +++ b/docs/src/whatsnew/3.3.rst @@ -0,0 +1,370 @@ +.. include:: ../common_links.inc + +v3.3 (1 Sep 2022) +***************** + +This document explains the changes made to Iris for this release +(:doc:`View all changes `.) + +.. dropdown:: |iris_version| v3.3.0 Release Highlights + :color: primary + :icon: info + :animate: fade-in + :open: + + The highlights for this minor release of Iris include: + + * We've added support for datums, loading them from NetCDF when the + :obj:`iris.FUTURE.datum_support` flag is set. + * We've greatly improved the speed of linear interpolation. + * We've added the function :func:`iris.pandas.as_cubes` for richer + conversion from Pandas. + * We've improved the functionality of :func:`iris.util.mask_cube`. + * We've improved the functionality and performance of the + :obj:`iris.analysis.PERCENTILE` aggregator. + * We've completed implementation of our :ref:`contributing.benchmarks` + infrastructure. + + And finally, get in touch with us on :issue:`GitHub` if you have + any issues or feature requests for improving Iris. Enjoy! + + +v3.3.1 (29 Sep 2022) +==================== + +.. dropdown:: v3.3.1 Patches + :color: secondary + :icon: alert + :animate: fade-in + + The patches in this release of Iris include: + + #. `@pp-mo`_ fixed the Jupyter notebook display of :class:`~iris.cube.CubeList`. + (:issue:`4973`, :pull:`4976`) + + #. `@pp-mo`_ fixed a bug in NAME loaders where data with no associated statistic would + load as a cube with invalid cell-methods, which cannot be printed or saved to netcdf. + (:issue:`3288`, :pull:`4933`) + + #. `@pp-mo`_ ensured that :data:`iris.cube.Cube.cell_methods` must always be an iterable + of :class:`iris.coords.CellMethod` objects (:pull:`4933`). + + #. `@trexfeathers`_ advanced the Cartopy pin to ``>=0.21``, as Cartopy's + change to default Transverse Mercator projection affects an Iris test. + See `SciTools/cartopy@fcb784d`_ and `SciTools/cartopy@8860a81`_ for more + details. (:pull:`4992`) + + #. `@trexfeathers`_ introduced the ``netcdf4!=1.6.1`` pin to avoid a + problem with segfaults. (:pull:`4992`) + + +📢 Announcements +================ + +#. Welcome to `@krikru`_ who made their first contribution to Iris 🎉 + + +✨ Features +=========== + +#. `@schlunma`_ added weighted aggregation over "group coordinates": + :meth:`~iris.cube.Cube.aggregated_by` now accepts the keyword `weights` if a + :class:`~iris.analysis.WeightedAggregator` is used. (:issue:`4581`, + :pull:`4589`) + +#. `@wjbenfold`_ added support for ``false_easting`` and ``false_northing`` to + :class:`~iris.coord_systems.Mercator`. (:issue:`3107`, :pull:`4524`) + +#. `@rcomer`_ and `@wjbenfold`_ (reviewer) implemented lazy aggregation for the + :obj:`iris.analysis.PERCENTILE` aggregator. (:pull:`3901`) + +#. `@pp-mo`_ fixed cube arithmetic operation for cubes with meshes. + (:issue:`4454`, :pull:`4651`) + +#. `@wjbenfold`_ added support for CF-compliant treatment of + ``standard_parallel`` and ``scale_factor_at_projection_origin`` to + :class:`~iris.coord_systems.Mercator`. (:issue:`3844`, :pull:`4609`) + +#. `@wjbenfold`_ added support datums associated with coordinate systems (e.g. + :class:`~iris.coord_systems.GeogCS` other subclasses of + :class:`~iris.coord_systems.CoordSystem`). Loading of datum information from + a netCDF file only happens when the :obj:`iris.FUTURE.datum_support` flag is + set. (:issue:`4619`, :pull:`4704`) + +#. `@wjbenfold`_ and `@stephenworsley`_ (reviewer) added a maximum run length + aggregator (:class:`~iris.analysis.MAX_RUN`). (:pull:`4676`) + +#. `@wjbenfold`_ and `@rcomer`_ (reviewer) added a ``climatological`` keyword to + :meth:`~iris.cube.Cube.aggregated_by` that causes the climatological flag to + be set and the point for each cell to equal its first bound, thereby + preserving the time of year. (:issue:`1422`, :issue:`4098`, :issue:`4665`, + :pull:`4723`) + +#. `@wjbenfold`_ and `@pp-mo`_ (reviewer) implemented the + :class:`~iris.coord_systems.PolarStereographic` CRS. (:issue:`4770`, + :pull:`4773`) + +#. `@rcomer`_ and `@wjbenfold`_ (reviewer) enabled passing of the + :func:`numpy.percentile` keywords through the :obj:`~iris.analysis.PERCENTILE` + aggregator. (:pull:`4791`) + +#. `@wjbenfold`_ and `@bjlittle`_ (reviewer) implemented + :func:`iris.plot.fill_between` and :func:`iris.quickplot.fill_between`. + (:issue:`3493`, :pull:`4647`) + +#. `@rcomer`_ and `@bjlittle`_ (reviewer) re-wrote :func:`iris.util.mask_cube` + to provide lazy evaluation and greater flexibility with respect to input types. + (:issue:`3936`, :pull:`4889`) + +#. `@stephenworsley`_ and `@lbdreyer`_ added a new kwarg ``expand_extras`` to + :func:`iris.util.new_axis` which can be used to specify instances of + :class:`~iris.coords.AuxCoord`, :class:`~iris.coords.CellMeasure` and + :class:`~iris.coords.AncillaryVariable` which should also be expanded to map + to the new axis. (:pull:`4896`) + +#. `@stephenworsley`_ updated to the latest CF Standard Names Table ``v79`` + (19 March 2022). (:pull:`4910`) + +#. `@trexfeathers`_ and `@lbdreyer`_ (reviewer) added + :func:`iris.pandas.as_cubes`, which provides richer conversion from + Pandas :class:`~pandas.Series` / :class:`~pandas.DataFrame`\s to one or more + :class:`~iris.cube.Cube`\s. This includes: n-dimensional datasets, + :class:`~iris.coords.AuxCoord`\s, :class:`~iris.coords.CellMeasure`\s, + :class:`~iris.coords.AncillaryVariable`\s, and multi-dimensional + coordinates. (:pull:`4890`) + + +🐛 Bugs Fixed +============= + +#. `@rcomer`_ reverted part of the change from :pull:`3906` so that + :func:`iris.plot.plot` no longer defaults to placing a "Y" coordinate (e.g. + latitude) on the y-axis of the plot. (:issue:`4493`, :pull:`4601`) + +#. `@rcomer`_ enabled passing of scalar objects to :func:`~iris.plot.plot` and + :func:`~iris.plot.scatter`. (:pull:`4616`) + +#. `@rcomer`_ fixed :meth:`~iris.cube.Cube.aggregated_by` with `mdtol` for 1D + cubes where an aggregated section is entirely masked, reported at + :issue:`3190`. (:pull:`4246`) + +#. `@rcomer`_ ensured that a :class:`matplotlib.axes.Axes`'s position is preserved + when Iris replaces it with a :class:`cartopy.mpl.geoaxes.GeoAxes`, fixing + :issue:`1157`. (:pull:`4273`) + +#. `@rcomer`_ fixed :meth:`~iris.coords.Coord.nearest_neighbour_index` for edge + cases where the requested point is float and the coordinate has integer + bounds, reported at :issue:`2969`. (:pull:`4245`) + +#. `@rcomer`_ modified bounds setting on :obj:`~iris.coords.DimCoord` instances + so that the order of the cell bounds is automatically reversed + to match the coordinate's direction if necessary. This is consistent with + the `Bounds for 1-D coordinate variables` subsection of the `Cell Boundaries`_ + section of the CF Conventions and ensures that contiguity is preserved if a + coordinate's direction is reversed. (:issue:`3249`, :issue:`423`, + :issue:`4078`, :issue:`3756`, :pull:`4466`) + +#. `@wjbenfold`_ and `@evertrol`_ prevented an ``AttributeError`` being logged + to ``stderr`` when a :class:`~iris.fileformats.cf.CFReader` that fails to + initialise is garbage collected. (:issue:`3312`, :pull:`4646`) + +#. `@wjbenfold`_ fixed plotting of circular coordinates to extend kwarg arrays + as well as the data. (:issue:`466`, :pull:`4649`) + +#. `@wjbenfold`_ and `@rcomer`_ (reviewer) corrected the axis on which masking + is applied when an aggregator adds a trailing dimension. (:pull:`4755`) + +#. `@rcomer`_ and `@pp-mo`_ ensured that all methods to create or modify a + :class:`iris.cube.CubeList` check that it only contains cubes. According to + code comments, this was supposedly already the case, but there were several bugs + and loopholes. (:issue:`1897`, :pull:`4767`) + +#. `@rcomer`_ modified cube arithmetic to handle mismatches in the cube's data + array type. This prevents masks being lost in some cases and therefore + resolves :issue:`2987`. (:pull:`3790`) + +#. `@krikru`_ and `@rcomer`_ updated :mod:`iris.quickplot` such that the + colorbar is added to the correct ``axes`` when specified as a keyword + argument to a plotting routine. Otherwise, by default the colorbar will be + added to the current axes of the current figure. (:pull:`4894`) + +#. `@rcomer`_ and `@bjlittle`_ (reviewer) modified :func:`iris.util.mask_cube` so it + either works in place or returns a new cube (:issue:`3717`, :pull:`4889`) + + +💣 Incompatible Changes +======================= + +#. `@rcomer`_ and `@bjlittle`_ (reviewer) updated Iris's calendar handling to be + consistent with ``cf-units`` version 3.1. In line with the `Calendar`_ + section in version 1.9 of the CF Conventions, we now use "standard" rather + than the deprecated "gregorian" label for the default calendar. Units may + still be instantiated with ``calendar="gregorian"`` but their calendar + attribute will be silently changed to "standard". This may cause failures in + code that explicitly checks the calendar attribute. (:pull:`4847`) + + +🚀 Performance +============== + +#. `@wjbenfold`_ added caching to the calculation of the points array in a + :class:`~iris.coords.DimCoord` created using + :meth:`~iris.coords.DimCoord.from_regular`. (:pull:`4698`) + +#. `@wjbenfold`_ introduced caching in :func:`_lazy_data._optimum_chunksize` and + :func:`iris.fileformats.pp_load_rules._epoch_date_hours` to reduce time spent + repeating calculations. (:pull:`4716`) + +#. `@pp-mo`_ made :meth:`~iris.cube.Cube.add_aux_factory` faster. + (:pull:`4718`) + +#. `@wjbenfold`_ and `@rcomer`_ (reviewer) permitted the fast percentile + aggregation method to be used on masked data when the missing data tolerance + is set to 0. (:issue:`4735`, :pull:`4755`) + +#. `@wjbenfold`_ improved the speed of linear interpolation using + :meth:`iris.analysis.trajectory.interpolate` (:pull:`4366`) + +#. NumPy ``v1.23`` behaviour changes mean that + :func:`iris.experimental.ugrid.utils.recombine_submeshes` now uses ~3x as + much memory; testing shows a ~16-million point mesh will now use ~600MB. + Investigated by `@pp-mo`_ and `@trexfeathers`_. (:issue:`4845`) + + +🔥 Deprecations +=============== + +#. `@trexfeathers`_ and `@lbdreyer`_ (reviewer) deprecated + :func:`iris.pandas.as_cube` in favour of the new + :func:`iris.pandas.as_cubes` - see `✨ Features`_ for more details. + (:pull:`4890`) + + +🔗 Dependencies +=============== + +#. `@rcomer`_ introduced the ``nc-time-axis >=1.4`` minimum pin, reflecting that + we no longer use the deprecated :class:`nc_time_axis.CalendarDateTime` + when plotting against time coordinates. (:pull:`4584`) + +#. `@wjbenfold`_ and `@bjlittle`_ (reviewer) unpinned ``pillow``. (:pull:`4826`) + +#. `@rcomer`_ introduced the ``cf-units >=3.1`` minimum pin, reflecting the + alignment of calendar behaviour in the two packages (see Incompatible Changes). + (:pull:`4847`) + +#. `@bjlittle`_ introduced the ``sphinx-gallery >=0.11.0`` minimum pin. + (:pull:`4885`) + +#. `@trexfeathers`_ updated the install process to work with setuptools + ``>=v64``, making ``v64`` the minimum compatible version. (:pull:`4903`) + +#. `@stephenworsley`_ and `@trexfeathers`_ introduced the ``shapely !=1.8.3`` + pin, avoiding a bug caused by its interaction with cartopy. + (:pull:`4911`, :pull:`4917`) + + +📚 Documentation +================ + +#. `@tkknight`_ added a page to show the issues that have been voted for. See + :ref:`voted_issues_top`. (:issue:`3307`, :pull:`4617`) + +#. `@wjbenfold`_ added a note about fixing proxy URLs in lockfiles generated + because dependencies have changed. (:pull:`4666`) + +#. `@lbdreyer`_ moved most of the User Guide's :class:`iris.Constraint` examples + from :ref:`loading_iris_cubes` to :ref:`cube_extraction` and added an + example of constraining on bounded time. (:pull:`4656`) + +#. `@tkknight`_ adopted the `PyData Sphinx Theme`_ for the documentation. + (:discussion:`4344`, :pull:`4661`) + +#. `@tkknight`_ updated our developers guidance to show our intent to adopt + numpydoc strings and fixed some API documentation rendering. + See :ref:`docstrings`. (:issue:`4657`, :pull:`4689`) + +#. `@trexfeathers`_ and `@lbdreyer`_ added a page with examples of converting + various mesh formats into the Iris Mesh Data Model. (:pull:`4739`) + +#. `@rcomer`_ updated the "Load a Time Series of Data From the NEMO Model" + gallery example. (:pull:`4741`) + +#. `@wjbenfold`_ added developer documentation to highlight some of the + utilities offered by :class:`iris.IrisTest` and how to update CML and other + output files. (:issue:`4544`, :pull:`4600`) + +#. `@trexfeathers`_ and `@abooton`_ modernised the Iris logo to be SVG format. + (:pull:`3935`) + + +💼 Internal +=========== + +#. `@trexfeathers`_ and `@pp-mo`_ finished implementing a mature benchmarking + infrastructure (see :ref:`contributing.benchmarks`), building on 2 hard + years of lessons learned 🎉. (:pull:`4477`, :pull:`4562`, :pull:`4571`, + :pull:`4583`, :pull:`4621`) + +#. `@wjbenfold`_ used the aforementioned benchmarking infrastructure to + introduce deep (large 3rd dimension) loading and realisation benchmarks. + (:pull:`4654`) + +#. `@wjbenfold`_ made :func:`iris.tests.stock.simple_1d` respect the + ``with_bounds`` argument. (:pull:`4658`) + +#. `@lbdreyer`_ replaced `nose`_ with `pytest`_ as Iris' test runner. + (:pull:`4734`) + +#. `@bjlittle`_ and `@trexfeathers`_ (reviewer) migrated to GitHub Actions + for Continuous-Integration. (:pull:`4503`) + +#. `@pp-mo`_ made tests run certain linux executables from the Python env, + specifically ncdump and ncgen. These could otherwise fail when run in IDEs + such as PyCharm and Eclipse, which don't automatically include the Python env + bin in the system PATH. + (:pull:`4794`) + +#. `@trexfeathers`_ and `@pp-mo`_ improved generation of stock NetCDF files. + (:pull:`4827`, :pull:`4836`) + +#. `@rcomer`_ removed some now redundant testing functions. (:pull:`4838`, + :pull:`4878`) + +#. `@bjlittle`_ and `@jamesp`_ (reviewer) and `@lbdreyer`_ (reviewer) extended + the GitHub Continuous-Integration to cover testing on ``py38``, ``py39``, + and ``py310``. (:pull:`4840`) + +#. `@bjlittle`_ and `@trexfeathers`_ (reviewer) adopted `setuptools-scm`_ for + automated ``iris`` package versioning. (:pull:`4841`) + +#. `@bjlittle`_ and `@trexfeathers`_ (reviewer) added building, testing and + publishing of ``iris`` PyPI ``sdist`` and binary ``wheels`` as part of + our GitHub Continuous-Integration. (:pull:`4849`) + +#. `@rcomer`_ and `@wjbenfold`_ (reviewer) used ``pytest`` parametrization to + streamline the gallery test code. (:pull:`4792`) + +#. `@trexfeathers`_ improved settings to better working with + ``setuptools_scm``. (:pull:`4925`) + + +.. comment + Whatsnew author names (@github name) in alphabetical order. Note that, + core dev names are automatically included by the common_links.inc: + +.. _@evertrol: https://github.com/evertrol +.. _@krikru: https://github.com/krikru + + +.. comment + Whatsnew resources in alphabetical order: + +.. _Calendar: https://cfconventions.org/Data/cf-conventions/cf-conventions-1.9/cf-conventions.html#calendar +.. _Cell Boundaries: https://cfconventions.org/Data/cf-conventions/cf-conventions-1.9/cf-conventions.html#cell-boundaries +.. _nose: https://nose.readthedocs.io +.. _PyData Sphinx Theme: https://pydata-sphinx-theme.readthedocs.io/en/stable/index.html +.. _pytest: https://docs.pytest.org +.. _setuptools-scm: https://github.com/pypa/setuptools_scm +.. _SciTools/cartopy@fcb784d: https://github.com/SciTools/cartopy/commit/fcb784daa65d95ed9a74b02ca292801c02bc4108 +.. _SciTools/cartopy@8860a81: https://github.com/SciTools/cartopy/commit/8860a8186d4dc62478e74c83f3b2b3e8f791372e diff --git a/docs/src/whatsnew/3.4.rst b/docs/src/whatsnew/3.4.rst new file mode 100644 index 0000000000..e8d4f0fd2b --- /dev/null +++ b/docs/src/whatsnew/3.4.rst @@ -0,0 +1,301 @@ +.. include:: ../common_links.inc + +v3.4 (01 Dec 2022) +****************** + +This document explains the changes made to Iris for this release +(:doc:`View all changes `.) + + +.. dropdown:: v3.4.0 Release Highlights + :color: primary + :icon: info + :animate: fade-in + :open: + + The highlights for this minor release of Iris include: + + * We have **archived older Iris documentation** - everything before + ``v3.0.0`` - so older versions will soon no longer appear in search + engines. If you need this older documentation: please + see :ref:`iris_support`. + * We have added a :ref:`glossary` to the Iris documentation. + * We have completed work to make **Pandas interoperability** handle + n-dimensional :class:`~iris.cube.Cube`\s. + * We have **begun refactoring Iris' regridding**, which has already improved + performance and functionality, with more potential in future! + * We have made several other significant `🚀 Performance Enhancements`_. + + And finally, get in touch with us on :issue:`GitHub` if you have + any issues or feature requests for improving Iris. Enjoy! + + + +v3.4.1 (21 Feb 2023) +==================== + +.. dropdown:: v3.4.1 Patches + :color: secondary + :icon: alert + :animate: fade-in + + The patches in this release of Iris include: + + #. `@trexfeathers`_ and `@pp-mo`_ made Iris' use of the `netCDF4`_ library + thread-safe. (:pull:`5095`) + + #. `@trexfeathers`_ and `@pp-mo`_ removed the netCDF4 pin mentioned in + `🔗 Dependencies`_ point 3. (:pull:`5095`) + + +📢 Announcements +================ + +#. Welcome to `@ESadek-MO`_, `@TTV-Intrepid`_ and `@hsteptoe`_, who made their + first contributions to Iris 🎉 + + .. _try_experimental_stratify: + +#. Do you enjoy `python-stratify`_? Did you know that Iris includes a + convenience for using `python-stratify`_ with :class:`~iris.cube.Cube`\s? + It has been 'experimental' for several years now, without receiving much + feedback, so it's **use it or lose it** time: please try out + :mod:`iris.experimental.stratify` and let us know what you think! + + +✨ Features +=========== + +#. `@ESadek-MO`_ edited :func:`~iris.io.expand_filespecs` to allow expansion of + non-existing paths, and added expansion functionality to :func:`~iris.io.save`. + (:issue:`4772`, :pull:`4913`) + +#. `@trexfeathers`_ and `Julian Heming`_ added new mappings between CF + standard names and UK Met Office LBFC codes. (:pull:`4859`) + +#. `@pp-mo`_ changed the metadata of a face/edge-type + :class:`~iris.experimental.ugrid.mesh.MeshCoord`, to be same as the face/edge + coordinate in the mesh from which it takes its ``.points``. Previously, all MeshCoords + took their metadata from the node coord, but only a node-type MeshCoord now does + that. Also, the MeshCoord ``.var_name`` is now that of the underlying coord, whereas + previously this was always None. These changes make MeshCoord more like an ordinary + :class:`~iris.coords.AuxCoord`, which avoids some specific known usage problems. + (:issue:`4860`, :pull:`5020`) + +#. `@Esadek-MO`_ and `@trexfeathers`_ added dim coord + prioritisation to ``_get_lon_lat_coords()`` in :mod:`iris.analysis.cartography`. + This allows :func:`iris.analysis.cartography.area_weights` and + :func:`~iris.analysis.cartography.project` to handle cubes which contain + both dim and aux coords of the same type e.g. ``longitude`` and ``grid_longitude``. + (:issue:`3916`, :pull:`5029`). + +#. `@stephenworsley`_ added the ability to regrid derived coordinates with the + :obj:`~iris.analysis.PointInCell` regridding scheme. (:pull:`4807`) + +#. `@trexfeathers`_ made NetCDF loading more tolerant by enabling skipping of + :class:`~iris.coords.DimCoord`\s, :class:`~iris.coords.AuxCoord`\s, + :class:`~iris.coords.CellMeasure`\s and + :class:`~iris.coords.AncillaryVariable`\s if they cannot be added to a + :class:`~iris.cube.Cube` (e.g. due to CF non-compliance). This is done via + a new error class: :class:`~iris.exceptions.CannotAddError` (subclass of + :class:`ValueError`). (:pull:`5054`) + +#. `@pp-mo`_ implemented == and != comparisons for :class:`~iris.Constraint` s. + A simple constraint is now == to another one constructed in the same way. + However, equality is limited for more complex cases : value-matching functions must + be the same identical function, and for &-combinations order is significant, + i.e. ``(c1 & c2) != (c2 & c1)``. + (:issue:`3616`, :pull:`3749`). + +#. `@hsteptoe`_ and `@trexfeathers`_ improved + :func:`iris.pandas.as_data_frame`\'s conversion of :class:`~iris.cube.Cube`\s to + :class:`~pandas.DataFrame`\s. This includes better handling of multiple + :class:`~iris.cube.Cube` dimensions, auxiliary coordinates and attribute + information. **Note:** the improvements are opt-in, by setting the + :obj:`iris.FUTURE.pandas_ndim` flag (see :class:`iris.Future` for more). + (:issue:`4526`, :pull:`4909`, :pull:`4669`, :pull:`5059`, :pull:`5074`) + + +🐛 Bugs Fixed +============= + +#. `@rcomer`_ and `@pp-mo`_ (reviewer) factored masking into the returned + sum-of-weights calculation from :obj:`~iris.analysis.SUM`. (:pull:`4905`) + +#. `@schlunma`_ fixed a bug which prevented using + :meth:`iris.cube.Cube.collapsed` on coordinates whose number of bounds + differs from 0 or 2. This enables the use of this method on mesh + coordinates. (:issue:`4672`, :pull:`4870`) + +#. `@bjlittle`_ and `@lbdreyer`_ (reviewer) fixed the building of the CF + Standard Names module ``iris.std_names`` for the ``setup.py`` commands + ``develop`` and ``std_names``. (:issue:`4951`, :pull:`4952`) + +#. `@lbdreyer`_ and `@pp-mo`_ (reviewer) fixed the cube print out such that + scalar ancillary variables are displayed in a dedicated section rather than + being added to the vector ancillary variables section. Further, ancillary + variables and cell measures that map to a cube dimension of length 1 are now + included in the respective vector sections. (:pull:`4945`) + +#. `@rcomer`_ removed some old redundant code that prevented determining the + order of time cells. (:issue:`4697`, :pull:`4729`) + +#. `@stephenworsley`_ improved the accuracy of the error messages for + :meth:`~iris.cube.Cube.coord` when failing to find coordinates in the case where + a coordinate is given as the argument. Similarly, improved the error messages for + :meth:`~iris.cube.Cube.cell_measure` and :meth:`~iris.cube.Cube.ancillary_variable`. + (:issue:`4898`, :pull:`4928`) + +#. `@stephenworsley`_ fixed a bug which caused derived coordinates to be realised + after calling :meth:`iris.cube.Cube.aggregated_by`. (:issue:`3637`, :pull:`4947`) + +#. `@rcomer`_ corrected the ``standard_name`` mapping from UM stash code ``m01s30i311`` + to indicate that this is the upward, rather than northward part of the flow. + (:pull:`5060`) + +#. `@bjlittle`_ and `@trexfeathers`_ (reviewer) fixed an issue which prevented + uncompressed PP fields with additional trailing padded words in the field + data to be loaded and saved. (:pull:`5058`) + +#. `@lbdreyer`_ and `@trexfeathers`_ (reviewer) fixed the handling of data when + regridding with :class:`~iris.analysis.UnstructuredNearest` or calling + :func:`~iris.analysis.trajectory.interpolate` such that the data type and mask is + preserved. (:issue:`4463`, :pull:`5062`) + + +💣 Incompatible Changes +======================= + +#. `@trexfeathers`_ altered testing to accept new Dask copying behaviour from + `dask/dask#9555`_ - copies of a Dask array created using ``da.from_array()`` + will all ``compute()`` to a shared identical array. So creating a + :class:`~iris.cube.Cube` using ``Cube(data=da.from_array(...``, then + using :class:`~iris.cube.Cube` :meth:`~iris.cube.Cube.copy`, + will produce two :class:`~iris.cube.Cube`\s that both return an identical + array when requesting :class:`~iris.cube.Cube` :attr:`~iris.cube.Cube.data`. + We do not expect this to affect typical user workflows but please get in + touch if you need help. (:pull:`5041`) + +#. `@trexfeathers`_ moved ``iris.experimental.animate.animate()`` to + :func:`iris.plot.animate`, in recognition of its successful use over several + years since introduction. (:pull:`5056`) + + +🚀 Performance Enhancements +=========================== + +#. `@rcomer`_ and `@pp-mo`_ (reviewer) increased aggregation speed for + :obj:`~iris.analysis.SUM`, :obj:`~iris.analysis.COUNT` and + :obj:`~iris.analysis.PROPORTION` on real data. (:pull:`4905`) + +#. `@bouweandela`_ made :meth:`iris.coords.Coord.cells` faster for time + coordinates. This also affects :meth:`iris.cube.Cube.extract`, + :meth:`iris.cube.Cube.subset`, and :meth:`iris.coords.Coord.intersect`. + (:pull:`4969`) + +#. `@bouweandela`_ improved the speed of :meth:`iris.cube.Cube.subset` / + :meth:`iris.coords.Coord.intersect`. + (:pull:`4955`) + +#. `@stephenworsley`_ improved the speed of the :obj:`~iris.analysis.PointInCell` + regridding scheme. (:pull:`4807`) + + +🔥 Deprecations +=============== + +#. `@hsteptoe`_ and `@trexfeathers`_ (reviewer) deprecated + :func:`iris.pandas.as_series` in favour of the new + :func:`iris.pandas.as_data_frame` - see `✨ Features`_ for more details. + (:pull:`4669`) + + +🔗 Dependencies +=============== + +#. `@rcomer`_ introduced the ``dask >=2.26`` minimum pin, so that Iris can benefit + from Dask's support for `NEP13`_ and `NEP18`_. (:pull:`4905`) + +#. `@trexfeathers`_ advanced the Cartopy pin to ``>=0.21``, as Cartopy's + change to default Transverse Mercator projection affects an Iris test. + See `SciTools/cartopy@fcb784d`_ and `SciTools/cartopy@8860a81`_ for more + details. + (:pull:`4968`) + +#. `@trexfeathers`_ introduced the ``netcdf4<1.6.1`` pin to avoid a problem + with segfaults. (:pull:`4968`, :pull:`5075`, :issue:`5016`) + +#. `@trexfeathers`_ updated the Matplotlib colormap registration in + :mod:`iris.palette` in response to a deprecation warning. Using the new + Matplotlib API also means a ``matplotlib>=3.5`` pin. (:pull:`4998`) + +#. See `💣 Incompatible Changes`_ for notes about `dask/dask#9555`_. + + +📚 Documentation +================ + +#. `@ESadek-MO`_, `@TTV-Intrepid`_ and `@trexfeathers`_ added a gallery example for zonal + means plotted parallel to a cartographic plot. (:pull:`4871`) + +#. `@Esadek-MO`_ added a key-terms :ref:`glossary` page into the user guide. (:pull:`4902`) + +#. `@pp-mo`_ added a :ref:`code example ` + for converting ORCA-gridded data to an unstructured cube. (:pull:`5013`) +#. `@Esadek-MO`_ added links to relevant Gallery examples within the User Guide + to improve understanding. (:pull:`5009`) + +#. `@trexfeathers`_ changed the warning header for the **latest** documentation + to reference Read the Docs' built-in version switcher, instead of generating + its own independent links. (:pull:`5055`) + +#. `@tkknight`_ updated the links for the Iris documentation to v2.4 and + earlier to point to the archive of zip files instead. (:pull:`5064`) + +#. `@Esadek-MO`_ began adding notes at the bottom of functions to + to clarify if the function preserves laziness or not. See :issue:`3292` for + the ongoing checklist. (:pull:`5066`) + +💼 Internal +=========== + +#. `@rcomer`_ removed the obsolete ``setUpClass`` method from Iris testing. + (:pull:`4927`) + +#. `@bjlittle`_ and `@lbdreyer`_ (reviewer) removed support for + ``python setup.py test``, which is a deprecated approach to executing + package tests, see `pypa/setuptools#1684`_. Also performed assorted + ``setup.py`` script hygiene. (:pull:`4948`, :pull:`4949`, :pull:`4950`) + +#. `@pp-mo`_ split the module :mod:`iris.fileformats.netcdf` into separate + :mod:`~iris.fileformats.netcdf.loader` and :mod:`~iris.fileformats.netcdf.saver` + submodules, just to make the code easier to handle. + +#. `@trexfeathers`_ adapted the benchmark for importing :mod:`iris.palette` to + cope with new colormap behaviour in Matplotlib `v3.6`. (:pull:`4998`) + +#. `@rcomer`_ removed a now redundant workaround for an old matplotlib bug, + highlighted by :issue:`4090`. (:pull:`4999`) + +#. `@rcomer`_ added the ``show`` option to the documentation Makefiles, as a + convenient way for contributors to view their built documentation. + (:pull:`5000`) + +.. comment + Whatsnew author names (@github name) in alphabetical order. Note that, + core dev names are automatically included by the common_links.inc: + +.. _@TTV-Intrepid: https://github.com/TTV-Intrepid +.. _Julian Heming: https://www.metoffice.gov.uk/research/people/julian-heming +.. _@hsteptoe: https://github.com/hsteptoe + + +.. comment + Whatsnew resources in alphabetical order: + +.. _NEP13: https://numpy.org/neps/nep-0013-ufunc-overrides.html +.. _NEP18: https://numpy.org/neps/nep-0018-array-function-protocol.html +.. _pypa/setuptools#1684: https://github.com/pypa/setuptools/issues/1684 +.. _SciTools/cartopy@fcb784d: https://github.com/SciTools/cartopy/commit/fcb784daa65d95ed9a74b02ca292801c02bc4108 +.. _SciTools/cartopy@8860a81: https://github.com/SciTools/cartopy/commit/8860a8186d4dc62478e74c83f3b2b3e8f791372e +.. _dask/dask#9555: https://github.com/dask/dask/pull/9555 diff --git a/docs/src/whatsnew/3.5.rst b/docs/src/whatsnew/3.5.rst new file mode 100644 index 0000000000..c6699ee842 --- /dev/null +++ b/docs/src/whatsnew/3.5.rst @@ -0,0 +1,214 @@ +.. include:: ../common_links.inc + +v3.5 (27 Apr 2023) +**************************************** + +This document explains the changes made to Iris for this release +(:doc:`View all changes `.) + +.. dropdown:: v3.5 Release Highlights + :color: primary + :icon: info + :animate: fade-in + :open: + + The highlights for this major/minor release of Iris include: + + * We added support for plugins. + * We allowed the usage of Iris objects as weights + for cube aggregations. + * We made Iris' use of the `netCDF4`_ library + thread-safe. + * We improved performance by changing the netCDF loader to + fetch data immediately from small netCDF. + variables, instead of creating a dask array. + * We added notes within docstrings clarifying whether operations + maintain lazy data or not. + * We're so proud to fully support `@ed-hawkins`_ and `#ShowYourStripes`_ ❤️ + + And finally, get in touch with us on :issue:`GitHub` if you have + any issues or feature requests for improving Iris. Enjoy! + + +📢 Announcements +================ + +#. Congratulations to `@ESadek-MO`_ who has become a core developer for Iris! 🎉 +#. Welcome and congratulations to `@HGWright`_, `@scottrobinson02`_ and + `@agriyakhetarpal`_ who made their first contributions to Iris! 🎉 + + +✨ Features +=========== + +#. `@bsherratt`_ added support for plugins - see the corresponding + :ref:`documentation page` for further information. + (:pull:`5144`) + +#. `@rcomer`_ enabled lazy evaluation of :obj:`~iris.analysis.RMS` calculations + with weights. (:pull:`5017`) + +#. `@schlunma`_ allowed the usage of cubes, coordinates, cell measures, or + ancillary variables as weights for cube aggregations + (:meth:`iris.cube.Cube.collapsed`, :meth:`iris.cube.Cube.aggregated_by`, and + :meth:`iris.cube.Cube.rolling_window`). This automatically adapts cube units + if necessary. (:pull:`5084`) + +#. `@lbdreyer`_ and `@trexfeathers`_ (reviewer) added :func:`iris.plot.hist` + and :func:`iris.quickplot.hist`. (:pull:`5189`) + +#. `@tinyendian`_ edited :func:`~iris.analysis.cartography.rotate_winds` to + enable lazy computation of rotated wind vector components (:issue:`4934`, + :pull:`4972`) + +#. `@ESadek-MO`_ updated to the latest CF Standard Names Table v80 + (07 February 2023). (:pull:`5244`) + + +🐛 Bugs Fixed +============= + +#. `@schlunma`_ fixed :meth:`iris.cube.CubeList.concatenate` so that it + preserves derived coordinates. (:issue:`2478`, :pull:`5096`) + +#. `@trexfeathers`_ and `@pp-mo`_ made Iris' use of the `netCDF4`_ library + thread-safe. (:pull:`5095`) + +#. `@ESadek-MO`_ removed check and error raise for saving + cubes with masked :class:`iris.coords.CellMeasure`. + (:issue:`5147`, :pull:`5181`) + +#. `@scottrobinson02`_ fixed :class:`iris.util.new_axis` creating an anonymous new + dimension, when the scalar coord provided is already a dim coord. + (:issue:`4415`, :pull:`5194`) + +#. `@HGWright`_ and `@trexfeathers`_ (reviewer) changed the way + :class:`~iris.coords.CellMethod` are printed to be more CF compliant. + (:pull:`5224`) + +#. `@stephenworsley`_ fixed the way discontiguities were discovered for 2D coords. + Previously, the only bounds being compared were the bottom right bound in one + cell with the bottom left bound in the cell to its right, and the top left bound + in a cell with the bottom left bound in the cell above it. Now all bounds are + compared with all adjacent bounds from neighbouring cells. This affects + :meth:`~iris.coords.Coord.is_contiguous` and :func:`iris.util.find_discontiguities` + where additional discontiguities may be detected which previously were not. + + +💣 Incompatible Changes +======================= + +#. N/A + + +🚀 Performance Enhancements +=========================== + +#. `@pp-mo`_ changed the netCDF loader to fetch data immediately from small netCDF + variables, instead of creating a dask array: This saves both time and memory. + Note that some cubes, coordinates etc loaded from netCDF will now have real data + where previously it was lazy. (:pull:`5229`) + + +🔥 Deprecations +=============== + +#. N/A + + +🔗 Dependencies +=============== + +#. `@trexfeathers`_ introduced the ``libnetcdf <4.9`` pin. (:pull:`5242`) + + +📚 Documentation +================ + +#. `@rcomer`_ clarified instructions for updating gallery tests. (:pull:`5100`) +#. `@tkknight`_ unpinned ``pydata-sphinx-theme`` and set the default to use + the light version (not dark) while we make the docs dark mode friendly + (:pull:`5129`) + +#. `@jonseddon`_ updated the citation to a more recent version of Iris. (:pull:`5116`) + +#. `@rcomer`_ linked the :obj:`~iris.analysis.PERCENTILE` aggregator from the + :obj:`~iris.analysis.MEDIAN` docstring, noting that the former handles lazy + data. (:pull:`5128`) + +#. `@trexfeathers`_ updated the WSL link to Microsoft's latest documentation, + and removed an ECMWF link in the ``v1.0`` What's New that was failing the + linkcheck CI. (:pull:`5109`) + +#. `@trexfeathers`_ added a new top-level :doc:`/community/index` section, + as a one-stop place to find out about getting involved, and how we relate + to other projects. (:pull:`5025`) + +#. The **Iris community**, with help from the **Xarray community**, produced + the :doc:`/community/iris_xarray` page, highlighting the similarities and + differences between the two packages. (:pull:`5025`) + +#. `@bjlittle`_ added a new section to the `README.md`_ to show our support + for the outstanding work of `@ed-hawkins`_ et al for `#ShowYourStripes`_. + (:pull:`5141`) + +#. `@HGWright`_ fixed some typo's from Gitwash. (:pull:`5145`) + +#. `@Esadek-MO`_ added notes to function docstrings to + to clarify if the function preserves laziness or not. (:pull:`5137`) + +💼 Internal +=========== + +#. `@bouweandela`_ and `@trexfeathers`_ (reviewer) modernized and simplified + the code of ``iris.analysis._Groupby``. (:pull:`5015`) + +#. `@fnattino`_ changed the order of ``ncgen`` arguments in the command to + create NetCDF files for testing (caused errors on OS X). (:pull:`5105`) + +#. `@rcomer`_ removed some old infrastructure that printed test timings. + (:pull:`5101`) + +#. `@lbdreyer`_ and `@trexfeathers`_ (reviewer) added coverage testing. This + can be enabled by using the "--coverage" flag when running the tests with + nox i.e. ``nox --session tests -- --coverage``. (:pull:`4765`) + +#. `@lbdreyer`_ and `@trexfeathers`_ (reviewer) removed the ``--coding-tests`` + option from Iris' test runner. (:pull:`4765`) + +#. `@lbdreyer`_ removed the Iris TestRunner. Tests are now run via nox or + pytest. (:pull:`5205`) + +#. `@agriyakhetarpal`_ and `@trexfeathers`_ prevented the GitHub action for + publishing releases to PyPI from running in forks. + (:pull:`5220`, :pull:`5248`) + +#. `@trexfeathers`_ moved the benchmark runner conveniences from ``noxfile.py`` + to a dedicated ``benchmarks/bm_runner.py``. (:pull:`5215`) + +#. `@bjlittle`_ follow-up to :pull:`4972`, enforced ``dask>=2022.09.0`` minimum + pin for first use of `dask.array.ma.empty_like`_ and replaced `@tinyendian`_ + workaround. (:pull:`5225`) + +#. `@HGWright`_, `@bjlittle`_ and `@trexfeathers`_ removed the legacy pin for + ``numpy`` array printing and replaced the test results files to match default + ``numpy`` output. (:pull:`5235`) + + +.. comment + Whatsnew author names (@github name) in alphabetical order. Note that, + core dev names are automatically included by the common_links.inc: + +.. _@fnattino: https://github.com/fnattino +.. _@ed-hawkins: https://github.com/ed-hawkins +.. _@scottrobinson02: https://github.com/scottrobinson02 +.. _@agriyakhetarpal: https://github.com/agriyakhetarpal +.. _@tinyendian: https://github.com/tinyendian + + +.. comment + Whatsnew resources in alphabetical order: + +.. _#ShowYourStripes: https://showyourstripes.info/s/globe/ +.. _README.md: https://github.com/SciTools/iris#----- +.. _dask.array.ma.empty_like: https://docs.dask.org/en/stable/generated/dask.array.ma.empty_like.html diff --git a/docs/src/whatsnew/3.6.rst b/docs/src/whatsnew/3.6.rst new file mode 100644 index 0000000000..b5a23ac401 --- /dev/null +++ b/docs/src/whatsnew/3.6.rst @@ -0,0 +1,235 @@ +.. include:: ../common_links.inc + +v3.6 (18 May 2023) +****************** + +This document explains the changes made to Iris for this release +(:doc:`View all changes `.) + + +.. dropdown:: v3.6 Release Highlights + :color: primary + :icon: info + :animate: fade-in + :open: + + We're so excited about our recent support for **delayed saving of lazy data + to netCDF** (:pull:`5191`) that we're celebrating this important step change + in behaviour with its very own dedicated release 🥳 + + By using ``iris.save(..., compute=False)`` you can now save to multiple NetCDF files + in parallel. See the new ``compute`` keyword in :func:`iris.fileformats.netcdf.save`. + This can share and reuse any common (lazy) result computations, and it makes much + better use of resources during any file-system waiting (i.e., it can use such periods + to progress the *other* saves). + + Usage example:: + + # Create output files with delayed data saving. + delayeds = [ + iris.save(cubes, filepath, compute=False) + for cubes, filepath in zip(output_cubesets, output_filepaths) + ] + # Complete saves in parallel. + dask.compute(*delayeds) + + This advance also includes **another substantial benefit**, because NetCDF saves can + now use a + `Dask.distributed scheduler `_. + With `Distributed `_ you can parallelise the + saves across a whole cluster. Whereas previously, the NetCDF saving *only* worked with + a "threaded" scheduler, limiting it to a single CPU. + + We're so super keen for the community to leverage the benefit of this new + feature within Iris that we've brought this release forward several months. + As a result, this minor release of Iris is intentionally light in content. + However, there are some other goodies available for you to enjoy, such as: + + * Performing lazy arithmetic with an Iris :class:`~iris.cube.Cube` and a + :class:`dask.array.Array`, and + * Various improvements to our documentation resulting from adoption of + `sphinx-design`_ and `sphinx-apidoc`_. + + As always, get in touch with us on :issue:`GitHub`, particularly + if you have any feedback with regards to delayed saving, or have any issues + or feature requests for improving Iris. Enjoy! + + +v3.6.1 (26 June 2023) +===================== + +.. dropdown:: v3.6.1 Patches + :color: primary + :icon: alert + :animate: fade-in + + 📢 **Announcements** + + Welcome and congratulations to `@sloosvel`_ who made their first contribution to + Iris! 🎉 + + The patches in this release of Iris include: + + ✨ **Features** + + #. `@rcomer`_ rewrote :func:`~iris.util.broadcast_to_shape` so it now handles + lazy data. This pull-request has been included to support :pull:`5341`. + (:pull:`5307`) [``pre-v3.7.0``] + + 🐛 **Bugs Fixed** + + #. `@stephenworsley`_ fixed :meth:`~iris.cube.Cube.convert_units` to allow unit + conversion of lazy data when using a `Distributed`_ scheduler. + (:issue:`5347`, :pull:`5349`) + + #. `@schlunma`_ fixed a bug in the concatenation of cubes with aux factories + which could lead to a `KeyError` due to dependencies that have not been + properly updated. + (:issue:`5339`, :pull:`5340`) + + #. `@schlunma`_ fixed a bug which realized all weights during weighted + aggregation. Now weighted aggregation is fully lazy again. + (:issue:`5338`, :pull:`5341`) + + 🚀 **Performance Enhancements** + + #. `@sloosvel`_ improved :meth:`~iris.cube.CubeList.concatenate_cube` and + :meth:`~iris.cube.CubeList.concatenate` to ensure that lazy auxiliary coordinate + points and bounds are not realized. This change now allows cubes with + high-resolution auxiliary coordinates to concatenate successfully whilst using a + minimal in-core memory footprint. + (:issue:`5115`, :pull:`5142`) + + Note that, the above contribution labelled with ``pre-v3.7.0`` is part of the + forthcoming Iris ``v3.7.0`` release, but requires to be included in this patch + release. + + +📢 Announcements +================ + +#. `@bjlittle`_ added the community `Contributor Covenant`_ code of conduct. + (:pull:`5291`) + + +✨ Features +=========== + +#. `@pp-mo`_ and `@lbdreyer`_ supported delayed saving of lazy data, when writing to + the netCDF file format. See :ref:`delayed netCDF saves `. + Also with significant input from `@fnattino`_. + (:pull:`5191`) + +#. `@rcomer`_ tweaked binary operations so that dask arrays may safely be passed + to arithmetic operations and :func:`~iris.util.mask_cube`. (:pull:`4929`) + + +🐛 Bugs Fixed +============= + +#. `@rcomer`_ enabled automatic replacement of a Matplotlib + :class:`~matplotlib.axes.Axes` with a Cartopy + :class:`~cartopy.mpl.geoaxes.GeoAxes` when the ``Axes`` is on a + :class:`~matplotlib.figure.SubFigure`. (:issue:`5282`, :pull:`5288`) + + +💣 Incompatible Changes +======================= + +#. N/A + + +🚀 Performance Enhancements +=========================== + +#. N/A + + +🔥 Deprecations +=============== + +#. N/A + + +🔗 Dependencies +=============== + +#. `@rcomer`_ and `@bjlittle`_ (reviewer) added testing support for python + 3.11. (:pull:`5226`) + +#. `@rcomer`_ dropped support for python 3.8, in accordance with the NEP29_ + recommendations (:pull:`5226`) + +#. `@trexfeathers`_ introduced the ``libnetcdf !=4.9.1`` and ``numpy !=1.24.3`` + pins (:pull:`5274`) + + +📚 Documentation +================ + +#. `@tkknight`_ migrated to `sphinx-design`_ over the legacy `sphinx-panels`_. + (:pull:`5127`) + +#. `@tkknight`_ updated the ``make`` target for ``help`` and added + ``livehtml`` to auto generate the documentation when changes are detected + during development. (:pull:`5258`) + +#. `@tkknight`_ updated the :ref:`installing_from_source` instructions to use + ``pip``. (:pull:`5273`) + +#. `@tkknight`_ removed the legacy custom sphinx extensions that generate the + API documentation. Instead use a less complex approach via + `sphinx-apidoc`_. (:pull:`5264`) + +#. `@trexfeathers`_ re-wrote the :ref:`iris_development_releases` documentation + for clarity, and wrote a step-by-step + :doc:`/developers_guide/release_do_nothing` for the release process. + (:pull:`5134`) + +#. `@trexfeathers`_ and `@tkknight`_ added a dark-mode friendly logo. + (:pull:`5278`) + + +💼 Internal +=========== + +#. `@bjlittle`_ added the `codespell`_ `pre-commit`_ ``git-hook`` to automate + spell checking within the code-base. (:pull:`5186`) + +#. `@bjlittle`_ and `@trexfeathers`_ (reviewer) added a `check-manifest`_ + GitHub Action and `pre-commit`_ ``git-hook`` to automate verification + of assets bundled within a ``sdist`` and binary ``wheel`` of our + `scitools-iris`_ PyPI package. (:pull:`5259`) + +#. `@rcomer`_ removed a now redundant copying workaround from Resolve testing. + (:pull:`5267`) + +#. `@bjlittle`_ and `@trexfeathers`_ (reviewer) migrated ``setup.cfg`` to + ``pyproject.toml``, as motivated by `PEP-0621`_. (:pull:`5262`) + +#. `@bjlittle`_ adopted `pypa/build`_ recommended best practice to build a + binary ``wheel`` from the ``sdist``. (:pull:`5266`) + +#. `@trexfeathers`_ enabled on-demand benchmarking of Pull Requests; see + :ref:`here `. (:pull:`5286`) + + +.. comment + Whatsnew author names (@github name) in alphabetical order. Note that, + core dev names are automatically included by the common_links.inc: + +.. _@fnattino: https://github.com/fnattino +.. _@sloosvel: https://github.com/sloosvel + + +.. comment + Whatsnew resources in alphabetical order: + +.. _sphinx-panels: https://github.com/executablebooks/sphinx-panels +.. _sphinx-design: https://github.com/executablebooks/sphinx-design +.. _check-manifest: https://github.com/mgedmin/check-manifest +.. _PEP-0621: https://peps.python.org/pep-0621/ +.. _pypa/build: https://pypa-build.readthedocs.io/en/stable/ +.. _NEP29: https://numpy.org/neps/nep-0029-deprecation_policy.html +.. _Contributor Covenant: https://www.contributor-covenant.org/version/2/1/code_of_conduct/ +.. _Distributed: https://distributed.dask.org/en/stable/ diff --git a/docs/src/whatsnew/3.7.rst b/docs/src/whatsnew/3.7.rst new file mode 100644 index 0000000000..fdadb20412 --- /dev/null +++ b/docs/src/whatsnew/3.7.rst @@ -0,0 +1,174 @@ +.. include:: ../common_links.inc + +v3.7 (31 Aug 2023) +****************** + +This document explains the changes made to Iris for this release +(:doc:`View all changes `.) + + +.. dropdown:: v3.7 Release Highlights + :color: primary + :icon: info + :animate: fade-in + :open: + + There are no major feature highlights for this release of Iris, but it's worth + noting that, in addition to some important bug fixes in specific areas, this time + we have made a number of improvements for user-experience and usability, + notably : + + * We added :ref:`Dark mode support ` for the documentation. + + * We :ref:`added a "Dask Best Practices" guide ` + ( :ref:`here ` ) . + + * We :ref:`improved the Installation Guide `. + + * We improved the information in + :ref:`warnings from CubeList.concatenate() ` + and :ref:`documentation of Cube.convert_units() `. + + * We prevented some warnings occurring in :ref:`pp loading ` + and :ref:`contourf `. + + Please do get in touch with us on :issue:`GitHub` if you have + any issues or feature requests for improving Iris. Enjoy! + + +v3.7.1 (04 Mar 2024) +==================== + +.. dropdown:: v3.7.1 Patches + :color: primary + :icon: alert + :animate: fade-in + + The patches in this release of Iris include: + + #. `@stephenworsley`_ fixed a potential memory leak for Iris uses of + :func:`dask.array.map_blocks`; known specifically to be a problem in the + :class:`iris.analysis.AreaWeighted` regridder. (:pull:`5767`) + + +📢 Announcements +================ + +#. N/A + + +✨ Features +=========== + +#. `@rcomer`_ rewrote :func:`~iris.util.broadcast_to_shape` so it now handles + lazy data. (:pull:`5307`) + + .. _concat_warnings: + +#. `@acchamber`_ added error and warning messages about coordinate overlaps to + :func:`~iris.cube.CubeList.concatenate` to improve the concatenation process. + (:pull:`5382`) + +#. `@trexfeathers`_ included mesh location coordinates + (e.g. :attr:`~iris.experimental.ugrid.MeshXY.face_coords`) in + the data variable's ``coordinates`` attribute when saving to NetCDF. + (:issue:`5206`, :pull:`5389`) + +#. `@pp-mo`_ modified the install process to record the release version of the CF + standard-names table, when it creates the ``iris/std_names.py`` module. + The release number is also now available as + ``iris.std_names.CF_STANDARD_NAMES_TABLE_VERSION``. + (:pull:`5423`) + + +🐛 Bugs Fixed +============= + +#. `@acchamber`_ fixed a bug with :func:`~iris.util.unify_time_units` so it does not block + concatenation through different data types in rare instances. (:pull:`5372`) + +#. `@acchamber`_ removed some obsolete code that prevented extraction of time points + from cubes with bounded times (:pull:`5175`) + + .. _cftime_warnings: + +#. `@rcomer`_ modified pp-loading to avoid a ``cftime`` warning for non-standard + calendars. (:pull:`5357`) + +#. `@rsdavies`_ modified the CF compliant standard name for m01s00i023 (:issue:`4566`) + + +💣 Incompatible Changes +======================= + +#. N/A + + +🚀 Performance Enhancements +=========================== + +#. `@rcomer`_ made :meth:`~iris.cube.Cube.aggregated_by` faster. (:pull:`4970`) + +🔥 Deprecations +=============== + +#. N/A + + +🔗 Dependencies +=============== + +#. N/A + +📚 Documentation +================ +.. _docs_dark: + +#. `@tkknight`_ prepared the documentation for dark mode and enable the option + to use it. By default the theme will be based on the users system settings, + defaulting to ``light`` if no system setting is found. (:pull:`5299`) + + .. _dask_guide: + +#. `@HGWright`_ added a :doc:`/further_topics/dask_best_practices/index` + section into the user guide, containing advice and use cases to help users + get the best out of Dask with Iris. (:pull:`5190`) + + .. _convert_docs: + +#. `@acchamber`_ improved documentation for :meth:`~iris.cube.Cube.convert_units` + and :meth:`~iris.coords.Coord.convert_units` by including a link to the UDUNITS-2 + documentation which contains lists of compatible units and aliases for them. + (:pull:`5388`) + + .. _installdocs_update: + +#. `@rcomer`_ updated the :ref:`Installation Guide` to reflect + that some things are now simpler. (:pull:`5416`) + + +💼 Internal +=========== + +#. `@pp-mo`_ supported loading and saving netcdf :class:`netCDF4.Dataset` compatible + objects in place of file-paths, as hooks for a forthcoming + `"Xarray bridge" `_ facility. + (:pull:`5214`, :pull:`5212`) + + .. _contour_future: + +#. `@rcomer`_ updated :func:`~iris.plot.contourf` to avoid using functionality + that is deprecated in Matplotlib v3.8 (:pull:`5405`) + + + +.. comment + Whatsnew author names (@github name) in alphabetical order. Note that, + core dev names are automatically included by the common_links.inc: +.. _@rsdavies: https://github.com/rsdavies +.. _@acchamber: https://github.com/acchamber + + + +.. comment + Whatsnew resources in alphabetical order: diff --git a/docs/src/whatsnew/3.8.rst b/docs/src/whatsnew/3.8.rst new file mode 100644 index 0000000000..9fa87a9337 --- /dev/null +++ b/docs/src/whatsnew/3.8.rst @@ -0,0 +1,308 @@ +.. include:: ../common_links.inc + +v3.8 (29 Feb 2024) +****************** + +This document explains the changes made to Iris for this release +(:doc:`View all changes `.) + + +.. dropdown:: v3.8 Release Highlights + :color: primary + :icon: info + :animate: fade-in + :open: + + The highlights for this major/minor release of Iris include: + + * We have significantly improved :class:`~iris.analysis.AreaWeighted` + regridding performance, and added improved regridding documentation (see + :ref:`which_regridder_to_use`). + + * We have improved :class:`~iris.cube.Cube` + :attr:`~iris.cube.Cube.attributes` handling to better preserve local and + global attribute metadata. + + * We have implemented the + :data:`iris.fileformats.netcdf.loader.CHUNK_CONTROL` context manager to + offer greater control to NetCDF chunking (see :ref:`netcdf_io`). + + * We have added functionality to mask cubes using shapefiles via + :func:`iris.util.mask_cube_from_shapefile` (see + :ref:`masking-from-shapefile`). + + * We have added :attr:`~iris.coords.Coord.ignore_axis` to allow for + preventing :func:`~iris.util.guess_coord_axis` acting on desired + coordinates. + + * We have begun adding improvements to Iris' warnings, to prevent warning + duplication. + + And finally, get in touch with us on :issue:`GitHub` if you have + any issues or feature requests for improving Iris. Enjoy! + + +v3.8.1 (04 Mar 2024) +==================== + +.. dropdown:: v3.8.1 Patches + :color: primary + :icon: alert + :animate: fade-in + :open: + + The patches in this release of Iris include: + + #. `@stephenworsley`_ fixed a potential memory leak for Iris uses of + :func:`dask.array.map_blocks`; known specifically to be a problem in the + :class:`iris.analysis.AreaWeighted` regridder. (:pull:`5767`) + + +📢 Announcements +================ + +#. `@lbdreyer`_ relicensed Iris from LGPL-3 to BSD-3. (:pull:`5577`) + +#. `@HGWright`_, `@bjlittle`_ and `@trexfeathers`_ (reviewers) added a + CITATION.cff file to Iris and updated the :ref:`citation documentation ` + , to help users cite Iris in their work. (:pull:`5483`) + + +✨ Features +=========== +#. `@pp-mo`_, `@lbdreyer`_ and `@trexfeathers`_ improved + :class:`~iris.cube.Cube` :attr:`~iris.cube.Cube.attributes` handling to + better preserve the distinction between dataset-level and variable-level + attributes, allowing file-Cube-file round-tripping of NetCDF attributes. See + :class:`~iris.cube.CubeAttrsDict`, NetCDF + :func:`~iris.fileformats.netcdf.saver.save` and :data:`~iris.Future` for more. + (:pull:`5152`, `split attributes project`_) + +#. `@rcomer`_ rewrote :func:`~iris.util.broadcast_to_shape` so it now handles + lazy data. (:pull:`5307`) + +#. `@trexfeathers`_ and `@HGWright`_ (reviewer) sub-categorised all Iris' + :class:`UserWarning`\s for richer filtering. The full index of + sub-categories can be seen here: :mod:`iris.warnings` . (:pull:`5498`, + :pull:`5760`) + +#. `@trexfeathers`_ added the :class:`~iris.coord_systems.ObliqueMercator` + and :class:`~iris.coord_systems.RotatedMercator` coordinate systems, + complete with NetCDF loading and saving. (:pull:`5548`) + +#. `@trexfeathers`_ added the ``use_year_at_season_start`` parameter to + :func:`iris.coord_categorisation.add_season_year`. When + ``use_year_at_season_start==True``: seasons spanning the year boundary (e.g. + Winter - December to February) will be assigned to the preceding year (e.g. + the year of December) instead of the following year (the default behaviour). + (:pull:`5573`) + +#. `@HGWright`_ added :attr:`~iris.coords.Coord.ignore_axis` to allow manual + intervention preventing :func:`~iris.util.guess_coord_axis` from acting on a + coordinate. `@trexfeathers`_ documented this. (:pull:`5551`, :pull:`5744`) + +#. `@pp-mo`_, `@trexfeathers`_ and `@ESadek-MO`_ added more control over + NetCDF chunking with the use of the :data:`iris.fileformats.netcdf.loader.CHUNK_CONTROL` + context manager. (:pull:`5588`) + +#. `@acchamber`_ and `@trexfeathers`_ (reviewer) added + :func:`iris.util.mask_cube_from_shapefile`. This builds on the original work + of `@ckmo`_, `@david-bentley`_, `@jmendesmetoffice`_, `@evyve`_ and + `@pelson`_ for the UK Met Office **ASCEND** library. See + :ref:`masking-from-shapefile` for documentation. (:pull:`5470`) + +#. `@trexfeathers`_ updated to the latest CF Standard Names Table v84 + (19 January 2024). (:pull:`5761`) + + +🐛 Bugs Fixed +============= + +#. `@scottrobinson02`_ fixed the output units when dividing a coordinate by a + cube. (:issue:`5305`, :pull:`5331`) + +#. `@ESadek-MO`_ has updated :mod:`iris.tests.graphics.idiff` to stop duplicated file names + preventing acceptance. (:issue:`5098`, :pull:`5482`) + +#. `@acchamber`_ and `@rcomer`_ modified 2D plots so that time axes and their + ticks have more sensible default labels. (:issue:`5426`, :pull:`5561`) + +#. `@rcomer`_ and `@trexfeathers`_ (reviewer) added handling for realization + coordinates when saving pp files (:issue:`4747`, :pull:`5568`) + +#. `@ESadek-MO`_ has updated + :mod:`iris.fileformats._nc_load_rules.helpers` to lessen warning duplication. + (:issue:`5536`, :pull:`5685`) + +#. `@bjlittle`_ fixed coordinate construction in the NetCDF loading pipeline to + ensure that bounds have the same units as the associated points. + (:issue:`1801`, :pull:`5746`) + + +💣 Incompatible Changes +======================= + +#. `@bouweandela`_ and `@trexfeathers`_ (reviewer) updated :class:`~iris.cube.Cube` + comparison so equality is now possible between cubes with data containing a + :obj:`numpy.nan`. e.g. ``Cube([np.nan, 1.0]) == Cube([np.nan, 1.0])`` will now + evaluate to :obj:`True`, while previously this would have been :obj:`False`. (:pull:`5713`) + + +🚀 Performance Enhancements +=========================== + +#. `@stephenworsley`_ improved the speed of :class:`~iris.analysis.AreaWeighted` + regridding. (:pull:`5543`) + +#. `@bouweandela`_ made :func:`iris.util.array_equal` faster when comparing + lazy data from file. This will also speed up coordinate comparison. + (:pull:`5610`) + +#. `@bouweandela`_ changed :func:`iris.coords.Coord.cell` so it does not realize + all coordinate data and only loads a single cell instead. (:pull:`5693`) + +#. `@rcomer`_ and `@trexfeathers`_ (reviewer) modified + :func:`~iris.analysis.stats.pearsonr` so it preserves lazy data in all cases + and also runs a little faster. (:pull:`5638`) + +#. `@bouweandela`_ made comparing coordinates and arrays to themselves faster. (:pull:`5691`) + +#. `@bouweandela`_ and `@trexfeathers`_ (reviewer) made comparing cubes to + themselves faster. (:pull:`5713`) + + +🔥 Deprecations +=============== + +#. N/A + + +🔗 Dependencies +=============== + +#. `@bjlittle`_ enforced the minimum pin of ``numpy>1.21`` in accordance with the `NEP29 Drop Schedule`_. + (:pull:`5525`) + +#. `@bjlittle`_ enforced the minimum pin of ``numpy>1.22`` in accordance with the `NEP29 Drop Schedule`_. + (:pull:`5668`) + +#. `@bjlittle`_ updated ``ubuntu`` and ``mambaforge`` to the latest versions for ``readthedocs`` + (:pull:`5702`) + + +📚 Documentation +================ + +#. `@trexfeathers`_ documented the intended use of warnings filtering with + Iris. See :ref:`filtering-warnings`. (:pull:`5509`) + +#. `@rcomer`_ updated the + :ref:`sphx_glr_generated_gallery_meteorology_plot_COP_maps.py` to show how + a colourbar may steal space from multiple axes. (:pull:`5537`) + +#. `@tkknight`_ improved the top navgation bar alignment and amount of + links shown. Also improved how the warning banner is implemented. + (:pull:`5505` and :pull:`5508`) + +#. `@tkknight`_ removed broken git links. (:pull:`5569`) + +#. `@ESadek-MO`_ added a phrasebook for synonymous terms used in similar + packages. (:pull:`5564`) + +#. `@ESadek-MO`_ and `@trexfeathers`_ created a technical paper for NetCDF + saving and loading, :ref:`netcdf_io` with a section on chunking, and placeholders + for further topics. (:pull:`5588`) + +#. `@bouweandela`_ updated all hyperlinks to https. (:pull:`5621`) + +#. `@ESadek-MO`_ created an index page for :ref:`further_topics_index`, and + relocated all 'Technical Papers' into + :ref:`further_topics_index`. (:pull:`5602`) + +#. `@trexfeathers`_ made drop-down icons visible to show which pages link to + 'sub-pages'. (:pull:`5684`) + +#. `@trexfeathers`_ improved the documentation of acceptable + :class:`~iris.cube.Cube` standard names in + :func:`iris.analysis.calculus.curl`. (:pull:`5680`) + +#. `@tkknight`_ added ruff documentation in the :ref:`developer_testing_ci` of the + :ref:`developers_guide`. (:pull:`5701`) + +#. `@tkknight`_ configured the API documentation to show 2 levels + for the ToC (Table of Contents) for each page. (:pull:`5714`) + + +💼 Internal +=========== + +#. `@trexfeathers`_ and `@ESadek-MO`_ (reviewer) performed a suite of fixes and + improvements for benchmarking, primarily to get + :ref:`on demand pull request benchmarking ` + working properly. (Main pull request: :pull:`5437`, more detail: + :pull:`5430`, :pull:`5431`, :pull:`5432`, :pull:`5434`, :pull:`5436`) + +#. `@trexfeathers`_ set a number of memory benchmarks to be on-demand, as they + were vulnerable to false positives in CI runs. (:pull:`5481`) + +#. `@acchamber`_ and `@ESadek-MO`_ resolved several deprecation to reduce + number of warnings raised during tests. + (:pull:`5493`, :pull:`5511`) + +#. `@trexfeathers`_ replaced all uses of the ``logging.WARNING`` level, in + favour of using Python warnings, following team agreement. (:pull:`5488`) + +#. `@trexfeathers`_ adapted benchmarking to work with ASV ``>=v0.6`` by no + longer using the ``--strict`` argument. (:pull:`5496`) + +#. `@fazledyn-or`_ replaced ``NotImplementedError`` with ``NotImplemented`` as + a proper method call. (:pull:`5544`) + +#. `@bjlittle`_ corrected various comment spelling mistakes detected by + `codespell`_. (:pull:`5546`) + +#. `@rcomer`_ reduced the size of the conda environment used for testing. + (:pull:`5606`) + +#. `@trexfeathers`_ and `@pp-mo`_ improved how the conda-forge feedstock + release candidate branch is managed, via: + :doc:`../developers_guide/release_do_nothing`. + (:pull:`5515`) + +#. `@bjlittle`_ adopted and configured the `ruff`_ linter. (:pull:`5623`) + +#. `@bjlittle`_ configured the ``line-length = 88`` for `black`_, `isort`_ + and `ruff`_. (:pull:`5632`) + +#. `@bjlittle`_ replaced `isort`_ with `ruff`_. (:pull:`5633`) + +#. `@bjlittle`_ replaced `black`_ with `ruff`_. (:pull:`5634`) + +#. `@tkknight`_ and `@bjlittle`_ (reviewer) updated codebase to be compliant with + almost all of the rules for `ruff pydocstyle`_. + (https://github.com/SciTools/iris/issues/5625#issuecomment-1859159734) + +#. `@tkknight`_ and `@bjlittle`_ (reviewer) updated codebase to ensure docstrings + that are not covered by the ruff checks, are consistent with numpydocstyle. + (:issue:`4721`) + +.. comment + Whatsnew author names (@github name) in alphabetical order. Note that, + core dev names are automatically included by the common_links.inc: + +.. _@scottrobinson02: https://github.com/scottrobinson02 +.. _@acchamber: https://github.com/acchamber +.. _@fazledyn-or: https://github.com/fazledyn-or +.. _@ckmo: https://github.com/ckmo +.. _@david-bentley: https://github.com/david-bentley +.. _@jmendesmetoffice: https://github.com/jmendesmetoffice +.. _@evyve: https://github.com/evyve + + +.. comment + Whatsnew resources in alphabetical order: + +.. _NEP29 Drop Schedule: https://numpy.org/neps/nep-0029-deprecation_policy.html#drop-schedule +.. _codespell: https://github.com/codespell-project/codespell +.. _split attributes project: https://github.com/orgs/SciTools/projects/5?pane=info +.. _ruff pydocstyle: https://docs.astral.sh/ruff/rules/#pydocstyle-d \ No newline at end of file diff --git a/docs/src/whatsnew/3.9.rst b/docs/src/whatsnew/3.9.rst new file mode 100644 index 0000000000..3af0e894b5 --- /dev/null +++ b/docs/src/whatsnew/3.9.rst @@ -0,0 +1,130 @@ +.. include:: ../common_links.inc + +v3.9 (22 Apr 2024) +****************** + +This document explains the changes made to Iris for this release +(:doc:`View all changes `.) + + +.. dropdown:: v3.9 Release Highlights + :color: primary + :icon: info + :animate: fade-in + :open: + + This is a small release to make two important changes available as soon as + possible: + + * The :mod:`iris.experimental.geovista` module. + * Removal of fill value collision warnings in NetCDF saving, which + significantly improves Iris' performance when parallel processing. + + See below for more detail on these changes. + + And finally, get in touch with us on :issue:`GitHub` if you have + any issues or feature requests for improving Iris. Enjoy! + + +📢 Announcements +================ + +#. ⏱️ Performance benchmarking has shown that loading + :term:`Fields File (FF) Format` with a large number of fields via + :func:`iris.fileformats.um.structured_um_loading` has become ~30% slower + since `Dask version 2024.2.1`_. + + +✨ Features +=========== + +#. `@HGWright`_ and `@trexfeathers`_ added the + :mod:`iris.experimental.geovista` module, providing conveniences for using + :ref:`ugrid geovista` with Iris. To see some of this in action, check out + :ref:`ugrid operations`. Note that GeoVista is an **optional** dependency + so you will need to explicitly install it into your environment. + (:pull:`5740`) + + +🐛 Bugs Fixed +============= + +#. `@pp-mo`_ prevented the CHUNK_CONTROL feature from hitting an error when loading + from a NetCDF v3 file. (:pull:`5897`) + + +💣 Incompatible Changes +======================= + +#. Warnings are no longer produced for fill value 'collisions' in NetCDF + saving. :ref:`Read more `. (:pull:`5833`) + + +🚀 Performance Enhancements +=========================== + +#. `@bouweandela`_ made :func:`iris.util.rolling_window` work with lazy arrays. + (:pull:`5775`) + +#. `@stephenworsley`_ fixed a potential memory leak for Iris uses of + :func:`dask.array.map_blocks`; known specifically to be a problem in the + :class:`iris.analysis.AreaWeighted` regridder. (:pull:`5767`) + +#. `@fnattino`_ and `@pp-mo`_ prevented cube printout from showing the values of lazy + scalar coordinates, since this can involve a lengthy computation that must be + re-computed each time. (:pull:`5896`) + + +🔥 Deprecations +=============== + +#. N/A + + +🔗 Dependencies +=============== + +#. `@bjlittle`_ dropped support for ``py39`` and adopted support for ``py312`` as per + the `NEP-29`_ schedule. (:pull:`5894`) + + +📚 Documentation +================ + +#. N/A + + +💼 Internal +=========== + +#. `@trexfeathers`_ setup automatic benchmarking on pull requests that modify + files likely to affect performance or performance testing. Such pull + requests are also labelled using the `Pull Request Labeler Github action`_ + to increase visibility. (:pull:`5763`, :pull:`5776`) + +#. `@tkknight`_ updated codebase to comply with a new enforced rule `NPY002`_ for + `ruff`_. (:pull:`5786`) + +#. `@tkknight`_ enabled `numpydoc validation`_ via the pre-commit hook. The docstrings + have been updated to comply and some rules have been ignored for now. + (:pull:`5762`) + +#. `@jfrost-mo`_ enabled colour output for pytest on GitHub Actions. (:pull:`5895`) + + +.. comment + Whatsnew author names (@github name) in alphabetical order. Note that, + core dev names are automatically included by the common_links.inc: + +.. _@jfrost-mo: https://github.com/jfrost-mo +.. _@fnattino: https://github.com/fnattino + + +.. comment + Whatsnew resources in alphabetical order: + +.. _Pull Request Labeler GitHub action: https://github.com/actions/labeler +.. _NPY002: https://docs.astral.sh/ruff/rules/numpy-legacy-random/ +.. _numpydoc validation: https://numpydoc.readthedocs.io/en/latest/validation.html# +.. _Dask version 2024.2.1: https://docs.dask.org/en/stable/changelog.html#v2024-2-1 +.. _NEP-29: https://numpy.org/neps/nep-0029-deprecation_policy.html#drop-schedule diff --git a/docs/iris/src/whatsnew/images/notebook_repr.png b/docs/src/whatsnew/images/notebook_repr.png similarity index 100% rename from docs/iris/src/whatsnew/images/notebook_repr.png rename to docs/src/whatsnew/images/notebook_repr.png diff --git a/docs/iris/src/whatsnew/images/transverse_merc.png b/docs/src/whatsnew/images/transverse_merc.png similarity index 100% rename from docs/iris/src/whatsnew/images/transverse_merc.png rename to docs/src/whatsnew/images/transverse_merc.png diff --git a/docs/src/whatsnew/index.rst b/docs/src/whatsnew/index.rst new file mode 100644 index 0000000000..74cb0cd43d --- /dev/null +++ b/docs/src/whatsnew/index.rst @@ -0,0 +1,45 @@ + +.. include:: ../common_links.inc + +.. _iris_whatsnew: + +What's New in Iris +------------------ + +.. include:: latest.rst + +.. toctree:: + :maxdepth: 1 + :hidden: + + latest.rst + 3.10.rst + 3.9.rst + 3.8.rst + 3.7.rst + 3.6.rst + 3.5.rst + 3.4.rst + 3.3.rst + 3.2.rst + 3.1.rst + 3.0.rst + 2.4.rst + 2.3.rst + 2.2.rst + 2.1.rst + 2.0.rst + 1.13.rst + 1.12.rst + 1.11.rst + 1.10.rst + 1.9.rst + 1.8.rst + 1.7.rst + 1.6.rst + 1.5.rst + 1.4.rst + 1.3.rst + 1.2.rst + 1.1.rst + 1.0.rst diff --git a/docs/src/whatsnew/latest.rst b/docs/src/whatsnew/latest.rst new file mode 100644 index 0000000000..58e3f5f956 --- /dev/null +++ b/docs/src/whatsnew/latest.rst @@ -0,0 +1,124 @@ +.. include:: ../common_links.inc + +|iris_version| |build_date| [unreleased] +**************************************** + +This document explains the changes made to Iris for this release +(:doc:`View all changes `.) + + +.. dropdown:: |iris_version| Release Highlights + :color: primary + :icon: info + :animate: fade-in + :open: + + The highlights for this major/minor release of Iris include: + + * N/A + + And finally, get in touch with us on :issue:`GitHub` if you have + any issues or feature requests for improving Iris. Enjoy! + + +📢 Announcements +================ + +#. N/A + + +✨ Features +=========== + +#. `@jrackham-mo`_ added :meth:`~iris.io.format_picker.FormatAgent.copy` and + equality methods to :class:`iris.io.format_picker.FormatAgent`, as requested + in :issue:`6108`, actioned in :pull:`6119`. + +#. `@ukmo-ccbunney`_ added ``colorbar`` keyword to allow optional creation of + the colorbar in the following quickplot methods: + + * :meth:`iris.quickplot.contourf` + + * :meth:`iris.quickplot.pcolor` + + * :meth:`iris.quickplot.pcolormesh` + + Requested in :issue:`5970`, actioned in :pull:`6169`. + + +🐛 Bugs Fixed +============= + +#. `@rcomer`_ enabled partial collapse of multi-dimensional string coordinates, + fixing :issue:`3653`. (:pull:`5955`) + +#. `@ukmo-ccbunney`_ improved error handling for malformed `cell_method` + attribute. Also made cell_method string parsing more lenient w.r.t. + whitespace. (:pull:`6083`) + +💣 Incompatible Changes +======================= + +#. N/A + + +🚀 Performance Enhancements +=========================== + +#. `@bouweandela`_ made the time coordinate categorisation functions in + :mod:`~iris.coord_categorisation` faster. Anyone using + :func:`~iris.coord_categorisation.add_categorised_coord` + with cftime :class:`~cftime.datetime` objects can benefit from the same + improvement by adding a type hint to their category funcion. (:pull:`5999`) + +#. `@bouweandela`_ made :meth:`iris.cube.CubeList.concatenate` faster if more + than two cubes are concatenated with equality checks on the values of + auxiliary coordinates, derived coordinates, cell measures, or ancillary + variables enabled. + In some cases, this may lead to higher memory use. This can be remedied by + reducing the number of Dask workers. + In rare cases, the new implementation could potentially be slower. This + may happen when there are very many or large auxiliary coordinates, derived + coordinates, cell measures, or ancillary variables to be checked that span + the concatenation axis. This issue can be avoided by disabling the + problematic check. (:pull:`5926`) + +🔥 Deprecations +=============== + +#. N/A + + +🔗 Dependencies +=============== + +#. N/A + + +📚 Documentation +================ + +#. `@bouweandela`_ added type hints for :class:`~iris.cube.Cube`. (:pull:`6037`) + +💼 Internal +=========== + +#. `@trexfeathers`_ improved the new ``tracemalloc`` benchmarking (introduced + in Iris v3.10.0, :pull:`5948`) to use the same statistical repeat strategy + as timing benchmarks. (:pull:`5981`) + +#. `@trexfeathers`_ adapted Iris to work with Cartopy v0.24. (:pull:`6171`, + :pull:`6172`) + + +.. comment + Whatsnew author names (@github name) in alphabetical order. Note that, + core dev names are automatically included by the common_links.inc: + +.. _@jrackham-mo: https://github.com/jrackham-mo + + +.. comment + Whatsnew resources in alphabetical order: + +.. _cartopy#2390: https://github.com/SciTools/cartopy/issues/2390 diff --git a/docs/src/whatsnew/latest.rst.template b/docs/src/whatsnew/latest.rst.template new file mode 100644 index 0000000000..fedddec5c0 --- /dev/null +++ b/docs/src/whatsnew/latest.rst.template @@ -0,0 +1,107 @@ +.. include:: ../common_links.inc + +|iris_version| |build_date| [unreleased] +**************************************** + +This document explains the changes made to Iris for this release +(:doc:`View all changes `.) + + +.. dropdown:: |iris_version| Release Highlights + :color: primary + :icon: info + :animate: fade-in + :open: + + The highlights for this major/minor release of Iris include: + + * N/A + + And finally, get in touch with us on :issue:`GitHub` if you have + any issues or feature requests for improving Iris. Enjoy! + + +NOTE: section BELOW is a template for bugfix patches +==================================================== + (Please remove this section when creating an initial 'latest.rst') + +|iris_version| |build_date| +=========================== + +.. dropdown:: |iris_version| Patches + :color: primary + :icon: alert + :animate: fade-in + + The patches in this release of Iris include: + + #. N/A + +NOTE: section ABOVE is a template for bugfix patches +==================================================== + (Please remove this section when creating an initial 'latest.rst') + + +📢 Announcements +================ + +#. N/A + + +✨ Features +=========== + +#. N/A + + +🐛 Bugs Fixed +============= + +#. N/A + + +💣 Incompatible Changes +======================= + +#. N/A + + +🚀 Performance Enhancements +=========================== + +#. N/A + + +🔥 Deprecations +=============== + +#. N/A + + +🔗 Dependencies +=============== + +#. N/A + + +📚 Documentation +================ + +#. N/A + + +💼 Internal +=========== + +#. N/A + + +.. comment + Whatsnew author names (@github name) in alphabetical order. Note that, + core dev names are automatically included by the common_links.inc: + + + + +.. comment + Whatsnew resources in alphabetical order: \ No newline at end of file diff --git a/docs/src/why_iris.rst b/docs/src/why_iris.rst new file mode 100644 index 0000000000..6c9b5fb7fb --- /dev/null +++ b/docs/src/why_iris.rst @@ -0,0 +1,43 @@ +.. _why_iris: + +Why Iris +======== + +**A powerful, format-agnostic, community-driven Python package for analysing +and visualising Earth science data.** + +Iris implements a data model based on the `CF conventions `_ +giving you a powerful, format-agnostic interface for working with your data. +It excels when working with multi-dimensional Earth Science data, where tabular +representations become unwieldy and inefficient. + +`CF Standard names `_, +`units `_, and coordinate metadata +are built into Iris, giving you a rich and expressive interface for maintaining +an accurate representation of your data. Its treatment of data and +associated metadata as first-class objects includes: + +.. rst-class:: squarelist + +* visualisation interface based on `matplotlib `_ and + `cartopy `_, +* unit conversion, +* subsetting and extraction, +* merge and concatenate, +* aggregations and reductions (including min, max, mean and weighted averages), +* interpolation and regridding (including nearest-neighbor, linear and + area-weighted), and +* operator overloads (``+``, ``-``, ``*``, ``/``, etc.). + +A number of file formats are recognised by Iris, including CF-compliant NetCDF, +GRIB, and PP, and it has a plugin architecture to allow other formats to be +added seamlessly. + +Building upon `NumPy `_ and +`dask `_, Iris scales from efficient +single-machine workflows right through to multi-core clusters and HPC. +Interoperability with packages from the wider scientific Python ecosystem comes +from Iris' use of standard NumPy/dask arrays as its underlying data storage. + +Iris is part of SciTools, for more information see https://scitools.org.uk/. +For **Iris 2.4** and earlier documentation please see :ref:`iris_support`. \ No newline at end of file diff --git a/etc/cf-standard-name-table.xml b/etc/cf-standard-name-table.xml index 0c4f59b888..c5405e2dca 100644 --- a/etc/cf-standard-name-table.xml +++ b/etc/cf-standard-name-table.xml @@ -1,10 +1,18 @@ - - 59 - 2018-09-25T14:23:22Z + + 85 + CF-StandardNameTable-85 + 2024-05-21T15:55:10Z + 2024-05-21T15:55:10Z Centre for Environmental Data Analysis support@ceda.ac.uk - + + + 1 + + + Acoustic area backscattering strength is 10 times the log10 of the ratio of the area backscattering coefficient to the reference value, 1 (m2 m-2). Area backscattering coefficient is the integral of the volume backscattering coefficient over a defined distance. Volume backscattering coefficient is the linear form of acoustic_volume_backscattering_strength_in_sea_water. For further details see MacLennan et. al (2002) doi:10.1006/jmsc.2001.1158. + s @@ -13,6 +21,20 @@ The quantity with standard name acoustic_signal_roundtrip_travel_time_in_sea_water is the time taken for an acoustic signal to propagate from the emitting instrument to a reflecting surface and back again to the instrument. In the case of an instrument based on the sea floor and measuring the roundtrip time to the sea surface, the data are commonly used as a measure of ocean heat content. + + 1 + + + Target strength is 10 times the log10 of the ratio of backscattering cross-section to the reference value, 1 m2. Backscattering cross-section is a parameter computed from the intensity of the backscattered sound wave relative to the intensity of the incident sound wave. For further details see MacLennan et. al (2002) doi:10.1006/jmsc.2001.1158. + + + + 1 + + + Acoustic volume backscattering strength is 10 times the log10 of the ratio of the volume backscattering coefficient to the reference value, 1 m-1. Volume backscattering coefficient is the integral of the backscattering cross-section divided by the volume sampled. Backscattering cross-section is a parameter computed from the intensity of the backscattered sound wave relative to the intensity of the incident sound wave. The parameter is computed to provide a measurement that is proportional to biomass density per unit volume in the field of fisheries acoustics. For further details see MacLennan et. al (2002) doi:10.1006/jmsc.2001.1158. + + m @@ -27,6 +49,13 @@ The "aerodynamic_resistance" is the resistance to mixing through the boundary layer toward the surface by means of the dominant process, turbulent transport. Reference: Wesely, M. L., 1989, doi:10.1016/0004-6981(89)90153-4. + + 1 + + + A variable with the standard_name of aerosol_type_in_atmosphere_layer_in_air contains either strings which indicate the type of the aerosol determined following a certain aerosol typing schema, or flags which can be translated to strings using flag_values and flag_meanings attributes. "Layer" means any layer with upper and lower boundaries that have constant values in some vertical coordinate. There must be a vertical coordinate variable indicating the extent of the layer(s). + + year @@ -45,7 +74,14 @@ day - "Age of surface snow" means the length of time elapsed since the snow accumulated on the earth's surface. The surface called "surface" means the lower boundary of the atmosphere. + "Age of surface snow" means the length of time elapsed since the snow accumulated on the earth's surface. Surface snow refers to the snow on the solid ground or on surface ice cover, but excludes, for example, falling snowflakes and snow on plants. + + + + 1 + + + This flag is an algorithmic combination of the results of all relevant quality tests run for the related ancillary parent data variable. The linkage between the data variable and this variable is achieved using the ancillary_variables attribute. The aggregate quality flag provides a summary of all quality tests performed on the data variable (both automated and manual) whether present in the dataset as independent ancillary variables to the parent data variable or not. @@ -55,11 +91,25 @@ + + K + + + The "equivalent potential temperature" is a thermodynamic quantity, with its natural logarithm proportional to the entropy of moist air, that is conserved in a reversible moist adiabatic process. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Equivalent_potential_temperature. It is the temperature of a parcel of air if all the moisture contained in it were first condensed, releasing latent heat, before moving the parcel dry adiabatically to a standard pressure, typically representative of mean sea level pressure. To specify the standard pressure to which the quantity applies, provide a scalar coordinate variable with standard name reference_pressure. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). + + + + K + + + The equivalent temperature is the temperature that an air parcel would have if all water vapor were condensed at contstant pressure and the enthalpy released from the vapor used to heat the air. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Equivalent_temperature. It is the isobaric equivalent temperature and not the adiabatic equivalent temperature, also known as pseudoequivalent temperature, which has the standard name air_pseudo_equivalent_temperature. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). + + K 13 theta - Potential temperature is the temperature a parcel of air or sea water would have if moved adiabatically to sea level pressure. + Air potential temperature is the temperature a parcel of air would have if moved dry adiabatically to a standard pressure, typically representative of mean sea level pressure. To specify the standard pressure to which the quantity applies, provide a scalar coordinate variable with standard name reference_pressure. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). @@ -125,46 +175,60 @@ "Top of atmosphere model" means the upper boundary of the top layer of an atmosphere model. Air pressure is the force per unit area which would be exerted when the moving gas molecules of which the air is composed strike a theoretical surface of any orientation. + + K + 14 + + The pseudoequivalent potential temperature is the temperature a parcel of air would have if it is expanded by a pseudoadiabatic (irreversible moist-adiabatic) process to zero pressure and afterwards compressed by a dry-adiabatic process to a standard pressure, typically representative of mean sea level pressure. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Pseudoequivalent_potential_temperature. A pseudoadiabatic process means that the liquid water that condenses is assumed to be removed as soon as it is formed. Reference: AMS Glossary http:/glossary.ametsoc.org/wiki/Pseudoadiabatic_process. To specify the standard pressure to which the quantity applies, provide a scalar coordinate variable with the standard name reference_pressure. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). + + + + K + + + The pseudoequivalent temperature is also known as the adiabatic equivalent temperature. It is the temperature that an air parcel would have after undergoing the following process: dry-adiabatic expansion until saturated; pseudoadiabatic expansion until all moisture is precipitated out; dry-adiabatic compression to the initial pressure. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Equivalent_temperature. This quantity is distinct from the isobaric equivalent temperature, also known as equivalent temperature, which has the standard name air_equivalent_temperature. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). + + K 11 E130 ta - Air temperature is the bulk temperature of the air, not the surface (skin) temperature. + Air temperature is the bulk temperature of the air, not the surface (skin) temperature. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K 25 - "anomaly" means difference from climatology. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. + The term "anomaly" means difference from climatology. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K - cloud_top refers to the top of the highest cloud. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. + cloud_top refers to the top of the highest cloud. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K - The "effective cloud top defined by infrared radiation" is (approximately) the geometric height above the surface that is one optical depth at infrared wavelengths (in the region of 11 micrometers) below the cloud top that would be detected by visible and lidar techniques. Reference: Minnis, P. et al 2011 CERES Edition-2 Cloud Property Retrievals Using TRMM VIRS and Terra and Aqua MODIS Data x2014; Part I: Algorithms IEEE Transactions on Geoscience and Remote Sensing, 49(11), 4374-4400. doi: http://dx.doi.org/10.1109/TGRS.2011.2144601. + The "effective cloud top defined by infrared radiation" is (approximately) the geometric height above the surface that is one optical depth at infrared wavelengths (in the region of 11 micrometers) below the cloud top that would be detected by visible and lidar techniques. Reference: Minnis, P. et al 2011 CERES Edition-2 Cloud Property Retrievals Using TRMM VIRS and Terra and Aqua MODIS Data x2014; Part I: Algorithms IEEE Transactions on Geoscience and Remote Sensing, 49(11), 4374-4400. doi: http://dx.doi.org/10.1109/TGRS.2011.2144601. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K m-1 19 - Air temperature is the bulk temperature of the air, not the surface (skin) temperature. A lapse rate is the negative derivative of a quantity with respect to increasing height above the surface, or the (positive) derivative with respect to increasing depth. + Air temperature is the bulk temperature of the air, not the surface (skin) temperature. A lapse rate is the negative derivative of a quantity with respect to increasing height above the surface, or the (positive) derivative with respect to increasing depth. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K - Air temperature is the bulk temperature of the air, not the surface (skin) temperature. Air temperature excess and deficit are calculated relative to the air temperature threshold. + Air temperature is the bulk temperature of the air, not the surface (skin) temperature. Air temperature excess and deficit are calculated relative to the air temperature threshold. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: on-scale", meaning that the temperature is relative to the origin of the scale indicated by the units, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). @@ -202,25 +266,32 @@ Altitude is the (geometric) height above the geoid, which is the reference geopotential surface. The geoid is similar to mean sea level. - + m - Altitude is the (geometric) height above the geoid, which is the reference geopotential surface. The geoid is similar to mean sea level. "Top of atmosphere model" means the upper boundary of the top layer of an atmosphere model. + The altitude at top of atmosphere boundary layer is the elevation above sea level of the top of the (atmosphere) planetary boundary layer. The phrase "defined_by" provides the information of the tracer used for identifying the atmospheric boundary layer top. "Ambient_aerosol" means that the aerosol is measured or modelled at the ambient state of pressure, temperature and relative humidity that exists in its immediate environment. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. "By ranging instrument" means that the backscattering is obtained through ranging techniques like lidar and radar. - + m - Altitude is the (geometric) height above the geoid, which is the reference geopotential surface. The geoid is similar to mean sea level. + The altitude at top of atmosphere mixed layer is the elevation above sea level of the top of the (atmosphere) mixed layer or convective boundary layer. The phrase "defined_by" provides the information of the tracer used for identifying the atmospheric boundary layer top. "Ambient_aerosol" means that the aerosol is measured or modelled at the ambient state of pressure, temperature and relative humidity that exists in its immediate environment. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. "By ranging instrument" means that the volume backscattering coefficient is obtained through ranging techniques like lidar and radar. - + m - "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Ambient_aerosol" means that the aerosol is measured or modelled at the ambient state of pressure, temperature and relative humidity that exists in its immediate environment. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. To specify the relative humidity and temperature at which the quantity described by the standard name applies, provide scalar coordinate variables with standard names of "relative_humidity" and "air_temperature". + Altitude is the (geometric) height above the geoid, which is the reference geopotential surface. The geoid is similar to mean sea level. "Top of atmosphere model" means the upper boundary of the top layer of an atmosphere model. + + + + m + + + Altitude is the (geometric) height above the geoid, which is the reference geopotential surface. The geoid is similar to mean sea level. @@ -262,7 +333,7 @@ degree - Standard names for "platform" describe the motion and orientation of the vehicle from which observations are made. Platforms include, but are not limited to, satellites, aeroplanes, ships, instruments and buoys. An angle of rotation is reckoned positive in the anticlockwise direction. The "angle_of_rotation_from_solar_azimuth_to_platform_azimuth" is the angle of rotation between the solar azimuth angle and the platform azimuth angle. Solar azimuth angle is the horizontal angle between the line of sight from the observation point to the sun and a reference direction at the observation point, which is often due north. The angle is measured clockwise, starting from the reference direction. Platform azimuth angle is the horizontal angle between the line of sight from the observation point to the platform and a reference direction at the observation point, which is often due north. The angle is measured clockwise, starting from the reference direction. + An angle of rotation is reckoned positive in the anticlockwise direction. The "angle_of_rotation_from_solar_azimuth_to_platform_azimuth" is the angle of rotation between the solar azimuth angle and the platform azimuth angle. Solar azimuth angle is the horizontal angle between the line of sight from the observation point to the sun and a reference direction at the observation point, which is often due north. The angle is measured clockwise, starting from the reference direction. Platform azimuth angle is the horizontal angle between the line of sight from the observation point to the platform and a reference direction at the observation point, which is often due north. The angle is measured clockwise, starting from the reference direction. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. @@ -272,6 +343,20 @@ The "Angstrom exponent" appears in the formula relating aerosol optical thickness to the wavelength of incident radiation: T(lambda) = T(lambda0) * [lambda/lambda0] ** (-1 * alpha) where alpha is the Angstrom exponent, lambda is the wavelength of incident radiation, lambda0 is a reference wavelength, T(lambda) and T(lambda0) are the values of aerosol optical thickness at wavelengths lambda and lambda0, respectively. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Ambient_aerosol" means that the aerosol is measured or modelled at the ambient state of pressure, temperature and relative humidity that exists in its immediate environment. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. To specify the relative humidity and temperature at which the quantity described by the standard name applies, provide scalar coordinate variables with standard names of "relative_humidity" and "air_temperature". + + 1 + + + The Angstrom exponent of volume backwards scattering is the Angstrom exponent related only to the aerosol backwards scattering component. It is alpha in the following equation relating volume backwards scattering (back) at the wavelength lambda to volume backwards scattering at a different wavelength lambda0: back(lambda) = back(lambda0) * [lambda/lambda0] ** (-1 * alpha). "Ambient_aerosol" means that the aerosol is measured or modelled at the ambient state of pressure, temperature and relative humidity that exists in its immediate environment. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. + + + + K + + + Air temperature is the bulk temperature of the air, not the surface (skin) temperature. The quantity with standard name apparent_air_temperature is the perceived air temperature derived from either a combination of temperature and wind (which has standard name wind_chill_of_air_temperature) or temperature and humidity (which has standard name heat_index_of_air_temperature) for the hour indicated by the time coordinate variable. When the air temperature falls to 283.15 K or below, wind chill is used for the apparent_air_temperature. When the air temperature rises above 299.817 K, the heat index is used for apparent_air_temperature. For temperatures above 283.15 and below 299.817K, the apparent_air_temperature is the ambient air temperature (which has standard name air_temperature). References: https://digital.weather.gov/staticpages/definitions.php; WMO codes registry entry http://codes.wmo.int/grib2/codeflag/4.2/_0-0-21. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). + + mol kg-1 @@ -283,42 +368,42 @@ 1 - "Area fraction" means the fraction of horizontal area. To specify which area is quantified by a variable of "area_fraction", provide a coordinate variable or scalar coordinate variable of "area_type". Alternatively, if one is defined, use a more specific standard name of "X_area_fraction" for the fraction of horizontal area occupied by X. + "Area fraction" is the fraction of a grid cell's horizontal area that has some characteristic of interest. It is evaluated as the area of interest divided by the grid cell area. It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. To specify which area is quantified by a variable with standard name area_fraction, provide a coordinate variable or scalar coordinate variable with standard name area_type. Alternatively, if one is defined, use a more specific standard name of X_area_fraction for the fraction of horizontal area occupied by X. 1 psbg - The surface called "surface" means the lower boundary of the atmosphere. The fraction of horizontal area where the surface specified by the axes other than horizontal axes, for instance an isobaric surface, is below the (ground or sea) surface. + The quantity with standard name area_fraction_below_surface is the fraction of horizontal area where a given isobaric surface is below the (ground or sea) surface. "Area fraction" is the fraction of a grid cell's horizontal area that has some characteristic of interest. It is evaluated as the area of interest divided by the grid cell area. It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. The surface called "surface" means the lower boundary of the atmosphere. 1 - "X_area_fraction" means the fraction of horizontal area occupied by X. "X_area" means the horizontal area occupied by X within the grid cell. A coordinate variable of solar_zenith_angle indicating the day extent should be specified. + "Area fraction" is the fraction of a grid cell's horizontal area that has some characteristic of interest. It is evaluated as the area of interest divided by the grid cell area. It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. A coordinate variable of solar_zenith_angle indicating the day extent should be specified. Solar zenith angle is the the angle between the line of sight to the sun and the local vertical. 1 - "X_area_fraction" means the fraction of horizontal area occupied by X. "X_area" means the horizontal area occupied by X within the grid cell. A coordinate variable of solar_zenith_angle indicating the night extent should be specified. + "Area fraction" is the fraction of a grid cell's horizontal area that has some characteristic of interest. It is evaluated as the area of interest divided by the grid cell area. It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. A coordinate variable of solar_zenith_angle indicating the day extent should be specified. Solar zenith angle is the the angle between the line of sight to the sun and the local vertical. 1 - "X_area_fraction" means the fraction of horizontal area occupied by X. "X_area" means the horizontal area occupied by X within the grid cell. A coordinate variable of solar_zenith_angle indicating the twilight extent should be specified. + "Area fraction" is the fraction of a grid cell's horizontal area that has some characteristic of interest. It is evaluated as the area of interest divided by the grid cell area. It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. A coordinate variable of solar_zenith_angle indicating the day extent should be specified. Solar zenith angle is the the angle between the line of sight to the sun and the local vertical. - A variable with the standard name of area_type contains strings which indicate the nature of the surface e.g. land, sea, sea_ice. These strings are standardised. Values must be taken from the area_type table. + A variable with the standard_name of area_type contains either strings which indicate the nature of the surface e.g. land, sea, sea_ice, or flags which can be translated to strings using flag_values and flag_meanings attributes. These strings are standardised. Values must be taken from the area_type table. @@ -328,13 +413,6 @@ The asymmetry factor is the angular integral of the aerosol scattering phase function weighted by the cosine of the angle with the incident radiation flux. The asymmetry coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Ambient_aerosol" means that the aerosol is measured or modelled at the ambient state of pressure, temperature and relative humidity that exists in its immediate environment. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. To specify the relative humidity and temperature at which the quantity described by the standard name applies, provide scalar coordinate variables with standard names of "relative_humidity" and "air_temperature". - - s-1 - 41 - - Absolute vorticity is the sum of relative vorticity and the upward component of vorticity due to the Earth's rotation. - - 1 @@ -392,21 +470,21 @@ - J kg -1 + J kg-1 Convective(ly) available potential energy (often abbreviated CAPE) is a stability measure calculated by integrating the positive temperature difference between the surrounding atmosphere and a parcel of air lifted adiabatically from the surface to its equilibrium level. CAPE exists under conditions of potential instability, and measures the potential energy per unit mass that would be released by the unstable parcel if it were able to convect upwards to equilibrium. - J kg -1 + J kg-1 Convective inhibition is the amount of energy per unit mass required to overcome the negatively buoyant energy exerted by the environment on a parcel of air. Convective inhibition is often abbreviated as "CIN" or "CINH". It is calculated by integrating the negative temperature difference between the surrounding atmosphere and a parcel of air lifted adiabatically from a given starting height to its equilibrium level. A coordinate variable of original_air_pressure_of_lifted_parcel should be specified to indicate the starting height of the lifted parcel. - J kg -1 + J kg-1 Convective inhibition is the amount of energy per unit mass required to overcome the negatively buoyant energy exerted by the environment on a parcel of air. Convective inhibition is often abbreviated as "CIN" or "CINH". It is calculated by integrating the negative temperature difference between the surrounding atmosphere and a parcel of air lifted adiabatically from the surface to its equilibrium level. @@ -461,6 +539,13 @@ + + m2 s-2 + + + One-half the scalar product of the air velocity and vorticity vectors, where vorticity refers to the standard name atmosphere_upward_absolute_vorticity. Helicity is proportional to the strength of the flow, the amount of vertical wind shear, and the amount of turning in the flow. + + m2 s-1 35 @@ -479,14 +564,14 @@ m - See Appendix D of the CF convention for information about dimensionless vertical coordinates. + See Appendix D of the CF convention for information about parametric vertical coordinates. 1 - See Appendix D of the CF convention for information about dimensionless vertical coordinates. + See Appendix D of the CF convention for information about parametric vertical coordinates. @@ -496,6 +581,13 @@ "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. + + m + + + The quantity with standard name atmosphere_layer_thickness_expressed_as_geopotential_height_difference is the difference of geopotential height between two atmospheric levels. "Layer" means any layer with upper and lower boundaries that have constant values in some vertical coordinate. There must be a vertical coordinate variable indicating the extent of the layer(s). If the layers are model layers, the vertical coordinate can be "model_level_number", but it is recommended to specify a physical coordinate (in a scalar or auxiliary coordinate variable) as well. "Thickness" means the vertical extent of a layer. Geopotential is the sum of the specific gravitational potential energy relative to the geoid and the specific centripetal potential energy. Geopotential height is the geopotential divided by the standard acceleration due to gravity. It is numerically similar to the altitude (or geometric height) and not to the quantity with standard name "height", which is relative to the surface. + + m @@ -528,7 +620,7 @@ 1 - "ln_X" means natural logarithm of X. X must be dimensionless. See Appendix D of the CF convention for information about dimensionless vertical coordinates. + "ln_X" means natural logarithm of X. X must be dimensionless. See Appendix D of the CF convention for information about parametric vertical coordinates. @@ -668,7 +760,7 @@ kg m-2 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. "Brox" describes a family of chemical species consisting of inorganic bromine compounds with the exception of hydrogen bromide (HBr) and bromine nitrate (BrONO2). "Brox" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity with a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. "Inorganic bromine", sometimes referred to as Bry, describes a family of chemical species which result from the degradation of source gases containing bromine (halons, methyl bromide, VSLS) and natural inorganic bromine sources such as volcanoes, seasalt and other aerosols. Standard names that use the term "inorganic_bromine" are used for quantities that contain all inorganic bromine species including HCl and ClONO2. The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. + "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer are used". "Brox" describes a family of chemical species consisting of inorganic bromine compounds with the exception of hydrogen bromide (HBr) and bromine nitrate (BrONO2). The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. "Brox" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity with a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. "Inorganic bromine", sometimes referred to as Bry, describes a family of chemical species which result from the degradation of source gases containing bromine (halons, methyl bromide, VSLS) and natural inorganic bromine sources such as volcanoes, sea salt and other aerosols. Standard names that use the term "inorganic_bromine" are used for quantities that contain all inorganic bromine species including HCl and ClONO2. @@ -696,49 +788,49 @@ kg m-2 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The chemical formula of carbon tetrachloride is CCl4. + "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The chemical formula of carbon tetrachloride is CCl4. The IUPAC name for carbon tetrachloride is tetrachloromethane. kg m-2 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The chemical formula of CFC11 is CFCl3. The IUPAC name fof CFC11 is trichloro-fluoro-methane. + "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The chemical formula of CFC11 is CFCl3. The IUPAC name for CFC11 is trichloro(fluoro)methane. kg m-2 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The chemical formula of CFC113 is CCl2FCClF2. The IUPAC name for CFC113 is 1,1,2-trichloro-1,2,2-trifluoro-ethane. + "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The chemical formula of CFC113 is CCl2FCClF2. The IUPAC name for CFC113 is 1,1,2-trichloro-1,2,2-trifluoroethane. kg m-2 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The chemical formula of CFC113a CCl3CF3. The IUPAC name for CFC113a is 1,1,1-trichloro-2,2,2-trifluoro-ethane. + "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The chemical formula of CFC113a is CCl3CF3. The IUPAC name for CFC113a is 1,1,1-trichloro-2,2,2-trifluoroethane. kg m-2 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The chemical formula of CFC114 is CClF2CClF2. The IUPAC name for CFC114 is 1,2-dichloro-1,1,2,2-tetrafluoro-ethane. + "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The chemical formula of CFC114 is CClF2CClF2. The IUPAC name for CFC114 is 1,2-dichloro-1,1,2,2-tetrafluoroethane. kg m-2 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The chemical formula of CFC115 is CClF2CF3. The IUPAC name for CFC115 is 1-chloro-1,1,2,2,2-pentafluoro-ethane. + "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The chemical formula of CFC115 is CClF2CF3. The IUPAC name for CFC115 is 1-chloro-1,1,2,2,2-pentafluoroethane. kg m-2 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The chemical formula for CFC12 is CF2Cl2. The IUPAC name for CFC12 is dichloro-difluoro-methane. + "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The chemical formula for CFC12 is CF2Cl2. The IUPAC name for CFC12 is dichloro(difluoro)methane. @@ -780,14 +872,14 @@ kg m-2 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. + "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. kg m-2 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. "Clox" describes a family of chemical species consisting of inorganic chlorine compounds with the exception of hydrogen chloride (HCl) and chlorine nitrate (ClONO2). "Clox" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity with a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. "Inorganic chlorine", sometimes referred to as Cly, describes a family of chemical species which result from the degradation of source gases containing chlorine (CFCs, HCFCs, VSLS) and natural inorganic chlorine sources such as seasalt and other aerosols. Standard names that use the term "inorganic_chlorine" are used for quantities that contain all inorganic chlorine species including HCl and ClONO2. The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. + "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. "Clox" describes a family of chemical species consisting of inorganic chlorine compounds with the exception of hydrogen chloride (HCl) and chlorine nitrate (ClONO2). The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. "Clox" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity with a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. "Inorganic chlorine", sometimes referred to as Cly, describes a family of chemical species which result from the degradation of source gases containing chlorine (CFCs, HCFCs, VSLS) and natural inorganic chlorine sources such as sea salt and other aerosols. Standard names that use the term "inorganic_chlorine" are used for quantities that contain all inorganic chlorine species including HCl and ClONO2. @@ -808,7 +900,7 @@ kg m-2 - Convective cloud is that produced by the convection schemes in an atmosphere model. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. + "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. Convective cloud is that produced by the convection schemes in an atmosphere model. "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. @@ -927,154 +1019,161 @@ kg m-2 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The chemical formula for halon1202 is CBr2F2. The IUPAC name for halon1202 is dibromo-difluoro-methane. + "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The chemical formula for Halon1202 is CBr2F2. The IUPAC name for Halon1202 is dibromo(difluoro)methane. kg m-2 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The chemical formula for halon1211 is CBrClF2. The IUPAC name for halon1211 is bromo-chloro-difluoro-methane. + "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The chemical formula for Halon1211 is CBrClF2. The IUPAC name for Halon1211 is bromo-chloro-difluoromethane. kg m-2 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The chemical formula for halon1301 is CBrF3. The IUPAC name for halon1301 is bromo-trifluoro-methane. + "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer are used". The mass is the total mass of the molecules. The chemical formula for Halon1301 is CBrF3. The IUPAC name for Halon1301 is bromo(trifluoro)methane. kg m-2 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The chemical formula for halo2402 is C2Br2F4. The IUPAC name for halon2402 is 1,2-dibromo-1,1,2,2-tetrafluoro-ethane. + "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The chemical formula for Halon2402 is C2Br2F4. The IUPAC name for Halon2402 is 1,2-dibromo-1,1,2,2-tetrafluoroethane. kg m-2 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The chemical formula for hcc140a is CH3CCl3. The IUPAC name for hcc140a is 1,1,1-trichloro-ethane. + "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The chemical formula for HCC140a, also called methyl chloroform, is CH3CCl3. The IUPAC name for HCC140a is 1,1,1-trichloroethane. kg m-2 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The chemical formula for HCFC141b is CH3CCl2F. The IUPAC name for HCFC141b is 1,1-dichloro-1-fluoroethane. + "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The chemical formula for HCFC141b is CH3CCl2F. The IUPAC name for HCFC141b is 1,1-dichloro-1-fluoroethane. kg m-2 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The chemical formula for HCFC142b is CH3CClF2. The IUPAC name for HCFC142b is 1-chloro-1,1-difluoroethane. + "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The chemical formula for HCFC142b is CH3CClF2. The IUPAC name for HCFC142b is 1-chloro-1,1-difluoroethane. kg m-2 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The chemical formula for HCFC22 is CHClF2. The IUPAC name for HCFC22 is chloro-difluoro-methane. + "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer are used". The mass is the total mass of the molecules. The chemical formula for HCFC22 is CHClF2. The IUPAC name for HCFC22 is chloro(difluoro)methane. kg m-2 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The chemical formula for hexachlorobiphenyl is C12H4Cl6. This structure of this species consists of two linked benzene rings, each of which is additionally bonded to three chlorine atoms. + "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The mass is the total mass of the molecules. The chemical formula for hexachlorobiphenyl is C12H4Cl6. The structure of this species consists of two linked benzene rings, each of which is additionally bonded to three chlorine atoms. kg m-2 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. "HOx" means a combination of two radical species containing hydrogen and oxygen: OH and HO2. The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. + "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. "HOx" means a combination of two radical species containing hydrogen and oxygen: OH and HO2. The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. kg m-2 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The chemical formula for hydrogen bromide is HBr. + "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The mass is the total mass of the molecules. The chemical formula for hydrogen bromide is HBr. kg m-2 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The chemical formula for hydrogen chloride is HCl. + "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The chemical formula for hydrogen chloride is HCl. kg m-2 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The chemical formula for hydrogen cyanide is HCN. + "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The mass is the total mass of the molecules. The chemical formula for hydrogen cyanide is HCN. kg m-2 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The chemical formula for hydrogen peroxide is H2O2. + "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The mass is the total mass of the molecules. The chemical formula for hydrogen peroxide is H2O2. kg m-2 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The chemical formula for the hydroperoxyl radical is HO2. In chemistry, a 'radical' is a highly reactive, and therefore shortlived, species. + "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The chemical formula for the hydroperoxyl radical is HO2. In chemistry, a 'radical' is a highly reactive, and therefore short lived, species. kg m-2 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The chemical formula for the hydroxyl radical is OH. In chemistry, a 'radical' is a highly reactive, and therefore shortlived, species. + "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The chemical formula for the hydroxyl radical is OH. In chemistry, a "radical" is a highly reactive, and therefore short lived, species. kg m-2 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The chemical formula for hypobromous acid is HOBr. + "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The chemical formula for hypobromous acid is HOBr. kg m-2 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The chemical formula for hypochlorous acid is HOCl. + "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The chemical formula for hypochlorous acid is HOCl. kg m-2 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. "Inorganic bromine", sometimes referred to as Bry, describes a family of chemical species which result from the degradation of source gases containing bromine (halons, methyl bromide, VSLS) and natural inorganic bromine sources such as volcanoes, seasalt and other aerosols. "Inorganic bromine" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. Standard names that use the term "brox" are used for quantities that contain all inorganic bromine species except HBr and BrONO2. + "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. "Inorganic bromine", sometimes referred to as Bry, describes a family of chemical species which result from the degradation of source gases containing bromine (halons, methyl bromide, VSLS) and natural inorganic bromine sources such as volcanoes, sea salt and other aerosols. "Inorganic bromine" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. Standard names that use the term "brox" are used for quantities that contain all inorganic bromine species except HBr and BrONO2. kg m-2 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. "Inorganic chlorine", sometimes referred to as Cly, describes a family of chemical species which result from the degradation of source gases containing chlorine (CFCs, HCFCs, VSLS) and natural inorganic chlorine sources such as seasalt and other aerosols. "Inorganic chlorine" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. Standard names that use the term "clox" are used for quantities that contain all inorganic chlorine species except HCl and ClONO2. + "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. "Inorganic chlorine", sometimes referred to as Cly, describes a family of chemical species which result from the degradation of source gases containing chlorine (CFCs, HCFCs, VSLS) and natural inorganic chlorine sources such as sea salt and other aerosols. "Inorganic chlorine" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. Standard names that use the term "clox" are used for quantities that contain all inorganic chlorine species except HCl and ClONO2. kg m-2 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The chemical formula for isoprene is CH2=C(CH3)CH=CH2. The IUPAC name for isoprene is 2-methyl-buta-1,3-diene. Isoprene is a member of the group of hydrocarbons known as terpenes. There are standard names for the terpene group as well as for some of the individual species. + "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The chemical formula for isoprene is CH2=C(CH3)CH=CH2. The IUPAC name for isoprene is 2-methylbuta-1,3-diene. Isoprene is a member of the group of hydrocarbons known as terpenes. There are standard names for the terpene group as well as for some of the individual species. kg m-2 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The chemical formula for limonene is C10H16. The IUPAC name for limonene is 1-methyl-4-prop-1-en-2-yl-cyclohexene. Limonene is a member of the group of hydrocarbons known as terpenes. There are standard names for the terpene group as well as for some of the individual species. + "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The chemical formula for limonene is C10H16. The IUPAC name for limonene is 1-methyl-4-prop-1-en-2-ylcyclohexene. Limonene is a member of the group of hydrocarbons known as terpenes. There are standard names for the terpene group as well as for some of the individual species. + + + + kg m-2 + + + "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. "Liquid_precipitation" includes both "rain" and "drizzle". "Rain" means drops of water falling through the atmosphere that have a diameter greater than 0.5 mm. "Drizzle" means drops of water falling through the atmosphere that have a diameter typically in the range 0.2-0.5 mm. @@ -1123,7 +1222,7 @@ kg m-2 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The chemical formula for methyl_peroxy_radical is CH3O2. In chemistry, a 'radical' is a highly reactive, and therefore shortlived, species. + "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The chemical formula for methyl_peroxy_radical is CH3O2. In chemistry, a "radical"is a highly reactive, and therefore short lived, species. @@ -1144,7 +1243,7 @@ kg m-2 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The chemical formula for nitrate is NO3. In chemistry, a 'radical' is a highly reactive, and therefore shortlived, species. + "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The chemical formula for nitrate is NO3. In chemistry, a "radical" is a highly reactive, and therefore short lived, species. @@ -1242,7 +1341,7 @@ kg m-2 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The term "peroxy_radicals" means all organic and inorganic peroxy radicals. This includes HO2 and all organic peroxy radicals, sometimes referred to as RO2. In chemistry, a 'radical' is a highly reactive, and therefore shortlived, species. + "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The term "peroxy_radicals" means all organic and inorganic peroxy radicals. This includes HO2 and all organic peroxy radicals, sometimes referred to as RO2. In chemistry, a "radical" is a highly reactive, and therefore short lived, species. @@ -1294,6 +1393,13 @@ "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The mass is the total mass of the particles. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. "Secondary particulate organic matter " means particulate organic matter formed within the atmosphere from gaseous precursors. The sum of primary_particulate_organic_matter_dry_aerosol and secondary_particulate_organic_matter_dry_aerosol is particulate_organic_matter_dry_aerosol. + + kg m-2 + + + "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. "Snow" refers to the precipitating part of snow in the atmosphere – the cloud snow content is excluded. + + kg m-2 @@ -1340,15 +1446,14 @@ kg m-2 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The chemical formula for toluene is C6H5CH3. Toluene has the same structure as benzene, except that one of the hydrogen atoms is replaced by a methyl group. The systematic name for toluene is methylbenzene. + "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The chemical formula for toluene is C6H5CH3. Toluene has the same structure as benzene, except that one of the hydrogen atoms is replaced by a methyl group. The IUPAC name for toluene is methylbenzene. kg m-2 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. "Volcanic_ash" means the fine-grained products of explosive volcanic eruptions, such as minerals or crystals, older fragmented rock (e.g. andesite), and glass. Particles within a volcanic ash cloud have diameters less than 2 mm. "Volcanic_ash" does not include non-volcanic dust. - + "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. "Volcanic_ash" means the fine-grained products of explosive volcanic eruptions, such as minerals or crystals, older fragmented rock (e.g. andesite), and glass. Particles within a volcanic ash cloud have diameters less than 2 mm. "Volcanic_ash" does not include non-volcanic dust. @@ -1544,7 +1649,7 @@ mol - The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. "Brox" describes a family of chemical species consisting of inorganic bromine compounds with the exception of hydrogen bromide (HBr) and bromine nitrate (BrONO2). "Brox" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity with a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. "Inorganic bromine", sometimes referred to as Bry, describes a family of chemical species which result from the degradation of source gases containing bromine (halons, methyl bromide, VSLS) and natural inorganic bromine sources such as volcanoes, seasalt and other aerosols. Standard names that use the term "inorganic_bromine" are used for quantities that contain all inorganic bromine species including HCl and ClONO2. The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. + The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. "Brox" describes a family of chemical species consisting of inorganic bromine compounds with the exception of hydrogen bromide (HBr) and bromine nitrate (BrONO2). The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. "Brox" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity with a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. "Inorganic bromine", sometimes referred to as Bry, describes a family of chemical species which result from the degradation of source gases containing bromine (halons, methyl bromide, VSLS) and natural inorganic bromine sources such as volcanoes, sea salt and other aerosols. Standard names that use the term "inorganic_bromine" are used for quantities that contain all inorganic bromine species including HCl and ClONO2. @@ -1572,49 +1677,49 @@ mol - The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for carbon tetrachloride is CCl4. + The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for carbon tetrachloride is CCl4. The IUPAC name for carbon tetrachloride is tetrachloromethane. mol - The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula of CFC11 is CFCl3. The IUPAC name fof CFC11 is trichloro-fluoro-methane. + The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula of CFC11 is CFCl3. The IUPAC name for CFC11 is trichloro(fluoro)methane. mol - The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula of CFC113 is CCl2FCClF2. The IUPAC name for CFC113 is 1,1,2-trichloro-1,2,2-trifluoro-ethane. + The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula of CFC113 is CCl2FCClF2. The IUPAC name for CFC113 is 1,1,2-trichloro-1,2,2-trifluoroethane. mol - The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula of CFC113a CCl3CF3. The IUPAC name for CFC113a is 1,1,1-trichloro-2,2,2-trifluoro-ethane. + The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula of CFC113a is CCl3CF3. The IUPAC name for CFC113a is 1,1,1-trichloro-2,2,2-trifluoroethane. mol - The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula of CFC114 is CClF2CClF2. The IUPAC name for CFC114 is 1,2-dichloro-1,1,2,2-tetrafluoro-ethane. + The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula of CFC114 is CClF2CClF2. The IUPAC name for CFC114 is 1,2-dichloro-1,1,2,2-tetrafluoroethane. mol - The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula of CFC115 is CClF2CF3. The IUPAC name for CFC115 is 1-chloro-1,1,2,2,2-pentafluoro-ethane. + The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula of CFC115 is CClF2CF3. The IUPAC name for CFC115 is 1-chloro-1,1,2,2,2-pentafluoroethane. mol - The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for CFC12 is CF2Cl2. The IUPAC name for CFC12 is dichloro-difluoro-methane. + The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for CFC12 is CF2Cl2. The IUPAC name for CFC12 is dichloro(difluoro)methane. @@ -1642,7 +1747,7 @@ mol - The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. "Clox" describes a family of chemical species consisting of inorganic chlorine compounds with the exception of hydrogen chloride (HCl) and chlorine nitrate (ClONO2). "Clox" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity with a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. "Inorganic chlorine", sometimes referred to as Cly, describes a family of chemical species which result from the degradation of source gases containing chlorine (CFCs, HCFCs, VSLS) and natural inorganic chlorine sources such as seasalt and other aerosols. Standard names that use the term "inorganic_chlorine" are used for quantities that contain all inorganic chlorine species including HCl and ClONO2. The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. + The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. "Clox" describes a family of chemical species consisting of inorganic chlorine compounds with the exception of hydrogen chloride (HCl) and chlorine nitrate (ClONO2). The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. "Clox" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity with a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. "Inorganic chlorine", sometimes referred to as Cly, describes a family of chemical species which result from the degradation of source gases containing chlorine (CFCs, HCFCs, VSLS) and natural inorganic chlorine sources such as sea salt and other aerosols. Standard names that use the term "inorganic_chlorine" are used for quantities that contain all inorganic chlorine species including HCl and ClONO2. @@ -1726,35 +1831,35 @@ mol - The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for halon1202 is CBr2F2. The IUPAC name for halon1202 is dibromo-difluoro-methane. + The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for Halon1202 is CBr2F2. The IUPAC name for Halon1202 is dibromo(difluoro)methane. mol - The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for halon1211 is CBrClF2. The IUPAC name for halon1211 is bromo-chloro-difluoro-methane. + The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for Halon1211 is CBrClF2. The IUPAC name for Halon1211 is bromo-chloro-difluoromethane. mol - The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for halon1301 is CBrF3. The IUPAC name for halon1301 is bromo-trifluoro-methane. + The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for Halon1301 is CBrF3. The IUPAC name for Halon1301 is bromo(trifluoro)methane. mol - The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for halo2402 is C2Br2F4. The IUPAC name for halon2402 is 1,2-dibromo-1,1,2,2-tetrafluoro-ethane. + The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for Halon2402 is C2Br2F4. The IUPAC name for Halon2402 is 1,2-dibromo-1,1,2,2-tetrafluoroethane. mol - The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for hcc140a is CH3CCl3. The IUPAC name for hcc140a is 1,1,1-trichloro-ethane. + The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for HCC140a, also called methyl chloroform, is CH3CCl3. The IUPAC name for HCC140a is 1,1,1-trichloroethane. @@ -1775,7 +1880,7 @@ mol - The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for HCFC22 is CHClF2. The IUPAC name for HCFC22 is chloro-difluoro-methane. + The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for HCFC22 is CHClF2. The IUPAC name for HCFC22 is chloro(difluoro)methane. @@ -1824,14 +1929,14 @@ mol - The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for the hydroperoxyl radical is HO2. In chemistry, a 'radical' is a highly reactive, and therefore shortlived, species. + The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for the hydroperoxyl radical is HO2. In chemistry, a "radical" is a highly reactive, and therefore short lived, species. mol - The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for the hydroxyl radical is OH. In chemistry, a 'radical' is a highly reactive, and therefore shortlived, species. + The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for the hydroxyl radical is OH. In chemistry, a "radical" is a highly reactive, and therefore short lived, species. @@ -1852,28 +1957,28 @@ mol - The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. "Inorganic bromine", sometimes referred to as Bry, describes a family of chemical species which result from the degradation of source gases containing bromine (halons, methyl bromide, VSLS) and natural inorganic bromine sources such as volcanoes, seasalt and other aerosols. "Inorganic bromine" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. Standard names that use the term "brox" are used for quantities that contain all inorganic bromine species except HBr and BrONO2. + The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. "Inorganic bromine", sometimes referred to as Bry, describes a family of chemical species which result from the degradation of source gases containing bromine (halons, methyl bromide, VSLS) and natural inorganic bromine sources such as volcanoes, sea salt and other aerosols. "Inorganic bromine" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. Standard names that use the term "brox" are used for quantities that contain all inorganic bromine species except HBr and BrONO2. mol - The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. "Inorganic chlorine", sometimes referred to as Cly, describes a family of chemical species which result from the degradation of source gases containing chlorine (CFCs, HCFCs, VSLS) and natural inorganic chlorine sources such as seasalt and other aerosols. "Inorganic chlorine" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. Standard names that use the term "clox" are used for quantities that contain all inorganic chlorine species except HCl and ClONO2. + The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. "Inorganic chlorine", sometimes referred to as Cly, describes a family of chemical species which result from the degradation of source gases containing chlorine (CFCs, HCFCs, VSLS) and natural inorganic chlorine sources such as sea salt and other aerosols. "Inorganic chlorine" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. Standard names that use the term "clox" are used for quantities that contain all inorganic chlorine species except HCl and ClONO2. mol - The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for isoprene is CH2=C(CH3)CH=CH2. The IUPAC name for isoprene is 2-methyl-buta-1,3-diene. Isoprene is a member of the group of hydrocarbons known as terpenes. There are standard names for the terpene group as well as for some of the individual species. + The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for isoprene is CH2=C(CH3)CH=CH2. The IUPAC name for isoprene is 2-methylbuta-1,3-diene. Isoprene is a member of the group of hydrocarbons known as terpenes. There are standard names for the terpene group as well as for some of the individual species. mol - The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for limonene is C10H16. The IUPAC name for limonene is 1-methyl-4-prop-1-en-2-yl-cyclohexene. Limonene is a member of the group of hydrocarbons known as terpenes. There are standard names for the terpene group as well as for some of the individual species. + The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for limonene is C10H16. The IUPAC name for limonene is 1-methyl-4-prop-1-en-2-ylcyclohexene. Limonene is a member of the group of hydrocarbons known as terpenes. There are standard names for the terpene group as well as for some of the individual species. @@ -1915,7 +2020,7 @@ mol - The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for methyl_peroxy_radical is CH3O2. In chemistry, a 'radical' is a highly reactive, and therefore shortlived, species. + The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for methyl_peroxy_radical is CH3O2. In chemistry, a "radical" is a highly reactive, and therefore short lived, species. @@ -1929,7 +2034,7 @@ mol - The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. In chemistry, a 'radical' is a highly reactive, and therefore shortlived, species. + The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. In chemistry, a "radical" is a highly reactive, and therefore short lived, species. @@ -2048,7 +2153,7 @@ mol - The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for toluene is C6H5CH3. Toluene has the same structure as benzene, except that one of the hydrogen atoms is replaced by a methyl group. The systematic name for toluene is methylbenzene. + The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for toluene is C6H5CH3. Toluene has the same structure as benzene, except that one of the hydrogen atoms is replaced by a methyl group. The IUPAC name for toluene is methylbenzene. @@ -2135,6 +2240,13 @@ "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. + + m + + + The height in the atmosphere, L, that buoyant production or destruction of turbulent energy balances the shear production of turbulent kinetic energy: L = -u*3 / (kB0), where u* is the wind frictional velocity, k is the von Karman constant, and B0 is the atmospheric surface buoyancy flux. If the buoyancy flux is destabilizing, L is negative. + + 1 @@ -2254,55 +2366,39 @@ "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. Potential energy is the sum of the gravitational potential energy relative to the geoid and the centripetal potential energy. (The geopotential is the specific potential energy.) - - s-1 - 43 E138 - - Relative vorticity is the upward component of the vorticity vector i.e. the component which arises from horizontal velocity. - - 1 - See Appendix D of the CF convention for information about dimensionless vertical coordinates. + See Appendix D of the CF convention for information about parametric vertical coordinates. 1 - See Appendix D of the CF convention for information about dimensionless vertical coordinates. + See Appendix D of the CF convention for information about parametric vertical coordinates. K - The atmosphere_stability_k_index is an index that indicates the potential of severe convection and is often referred to a simply the k index. The index is derived from the difference in air temperature between 850 and 500 hPa, the dew point temperature at 850 hPa, and the difference between the air temperature and the dew point temperature at -700 hPa. - + The atmosphere_stability_k_index is an index that indicates the potential of severe convection and is often referred to as simply the k index. The index is calculated as A + B - C, where A is the difference in air temperature between 850 and 500 hPa, B is the dew point temperature at 850 hPa, and C is the dew point depression (i.e. the amount by which the air temperature exceeds its dew point temperature) at 700 hPa. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K - The atmosphere_stability_showalter_index is an index used to determine convective and thunderstorm potential and is often referred to as simply the showalter index. The index is defined as the temperature difference between a parcel of air lifted from 850 to 500 hPa (wet adiabatically) and the ambient air temperature at 500 hPa. - + The atmosphere_stability_showalter_index is an index used to determine convective and thunderstorm potential and is often referred to as simply the showalter index. The index is defined as the temperature difference between a parcel of air lifted from 850 to 500 hPa (wet adiabatically) and the ambient air temperature at 500 hPa. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K - The atmosphere_stability_total_totals_index indicates the -likelihood of severe convection and is often referred to as simply the -total totals index. The index is derived from the difference in air -temperature between 850 and 500 hPa (the vertical totals) and the -difference between the dew point temperature at 850 hPa and the air -temperature at 500 hPa (the cross totals). The vertical totals and cross -totals are summed to obtain the index. + The atmosphere_stability_total_totals_index indicates thelikelihood of severe convection and is often referred to as simply thetotal totals index. The index is derived from the difference in airtemperature between 850 and 500 hPa (the vertical totals) and thedifference between the dew point temperature at 850 hPa and the airtemperature at 500 hPa (the cross totals). The vertical totals and crosstotals are summed to obtain the index. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). @@ -2316,7 +2412,42 @@ totals are summed to obtain the index. kg m-2 s-1 - In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The atmosphere convective mass flux is the vertical transport of mass for a field of cumulus clouds or thermals, given by the product of air density and vertical velocity. For an area-average, cell_methods should specify whether the average is over all the area or the area of updrafts and/or downdrafts only. "Updraft" means that the flux is positive in the updward direction (negative downward). + The atmosphere convective mass flux is the vertical transport of mass for a field of cumulus clouds or thermals, given by the product of air density and vertical velocity. For an area-average, cell_methods should specify whether the average is over all the area or the area of updrafts and/or downdrafts only. "Updraft" means that the flux is positive in the updward direction (negative downward). upward. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. + + + + s-1 + 41 + + Atmosphere upward absolute vorticity is the sum of the atmosphere upward relative vorticity and the vertical component of vorticity due to the Earth’s rotation. In contrast, the quantity with standard name atmosphere_upward_relative_vorticity excludes the Earth's rotation. Vorticity is a vector quantity. "Upward" indicates a vector component which is positive when directed upward (negative downward). A positive value of atmosphere_upward_absolute_vorticity indicates anticlockwise rotation when viewed from above. + + + + s-1 + 43 E138 + + Atmosphere upward relative vorticity is the vertical component of the 3D air vorticity vector. The vertical component arises from horizontal velocity only. "Relative" in this context means the vorticity of the air relative to the rotating solid earth reference frame, i.e. excluding the Earth's own rotation. In contrast, the quantity with standard name atmosphere_upward_absolute_vorticity includes the Earth's rotation. "Upward" indicates a vector component which is positive when directed upward (negative downward). A positive value of atmosphere_upward_relative_vorticity indicates anticlockwise rotation when viewed from above. + + + + s-1 + + + Atmosphere x relative vorticity is the x component of the 3D air vorticity vector. "Relative" in this context means the vorticity of the air relative to the rotating solid earth reference frame, i.e. excluding the Earth's own rotation. "x" indicates a vector component along the grid x-axis, positive with increasing x. A positive value of atmosphere_x_relative_vorticity indicates anticlockwise rotation when viewed by an observer looking along the axis in the direction of decreasing x, i.e. consistent with the "right hand screw" rule. + + + + s-1 + + + Atmosphere y relative vorticity is the y component of the 3D air vorticity vector. "Relative" in this context means the vorticity of the air relative to the rotating solid earth reference frame, i.e. excluding the Earth's own rotation. "y" indicates a vector component along the grid y-axis, positive with increasing y. A positive value of atmosphere_y_relative_vorticity indicates anticlockwise rotation when viewed by an observer looking along the axis in the direction of decreasing y, i.e. consistent with the "right hand screw" rule. + + + + 1 + + + A quality flag that reports the result of the Attenuated Signal test, which checks for near-flat-line conditions using a range or standard deviation. The linkage between the data variable and this variable is achieved using the ancillary_variables attribute. There are standard names for other specific quality tests which take the form of X_quality_flag. Quality information that does not match any of the specific quantities should be given the more general standard name of quality_flag. @@ -2326,11 +2457,11 @@ totals are summed to obtain the index. The Automated Tropical Cyclone Forecasting System (ATCF) storm identifier is an 8 character string which identifies a tropical cyclone. The storm identifier has the form BBCCYYYY, where BB is the ocean basin, specifically: AL - North Atlantic basin, north of the Equator; SL - South Atlantic basin, south of the Equator; EP - North East Pacific basin, eastward of 140 degrees west longitude; CP - North Central Pacific basin, between the dateline and 140 degrees west longitude; WP - North West Pacific basin, westward of the dateline; IO - North Indian Ocean basin, north of the Equator between 40 and 100 degrees east longitude; SH - South Pacific Ocean basin and South Indian Ocean basin. CC is the cyclone number. Numbers 01 through 49 are reserved for tropical and subtropical cyclones. A cyclone number is assigned to each tropical or subtropical cyclone in each basin as it develops. Numbers are assigned in chronological order. Numbers 50 through 79 are reserved for internal use by operational forecast centers. Numbers 80 through 89 are reserved for training, exercises and testing. Numbers 90 through 99 are reserved for tropical disturbances having the potential to become tropical or subtropical cyclones. The 90's are assigned sequentially and reused throughout the calendar year. YYYY is the four-digit year. This is calendar year for the northern hemisphere. For the southern hemisphere, the year begins July 1, with calendar year plus one. Reference: Miller, R.J., Schrader, A.J., Sampson, C.R., & Tsui, T.L. (1990), The Automated Tropical Cyclone Forecasting System (ATCF), American Meteorological Society Computer Techniques, 5, 653 - 660. - + 1 - Scattering of radiation is its deflection from its incident path without loss of energy. Backwards scattering refers to the sum of scattering into all backward angles i.e. scattering_angle exceeding pi/2 radians. A scattering_angle should not be specified with this quantity. "Backscattering ratio" is the ratio of the quantity with standard name volume_attenuated_backwards_scattering_function_in_air to the quantity with standard name volume_attenuated_backwards_scattering_function_in_air_assuming_no_aerosol_or_cloud. + Scattering of radiation is its deflection from its incident path without loss of energy. Backwards scattering refers to the sum of scattering into all backward angles i.e. scattering_angle exceeding pi/2 radians. A scattering_angle should not be specified with this quantity. "Backscattering ratio" is the ratio of the quantity with standard name volume_attenuated_backwards_scattering_function_in_air to the quantity with standard name volume_attenuated_backwards_scattering_function_in_air_assuming_no_aerosol_or_cloud. @@ -2396,6 +2527,13 @@ totals are summed to obtain the index. "Baseflow" is subsurface runoff which takes place below the level of the water table. Runoff is the liquid water which drains from land. "Amount" means mass per unit area. + + 1 + + + The "beam_consistency_indicator" is the degree to which the received acoustic pulse is correlated with the transmitted pulse. It is used as a data quality assessment parameter in ADCP (acoustic doppler current profiler) instruments and is frequently referred to as "correlation magnitude". Convention is that the larger the value, the higher the signal to noise ratio and therefore the better the quality of the current vector measurements; the maximum value of the indicator is 128. + + 1 @@ -2407,14 +2545,35 @@ totals are summed to obtain the index. m - Altitude is the (geometric) height above the geoid, which is the reference geopotential surface. The geoid is similar to mean sea level. "Bedrock" is the solid Earth surface beneath land ice or ocean water. + Altitude is the (geometric) height above the geoid, which is the reference geopotential surface. The geoid is similar to mean sea level. "Bedrock" is the solid Earth surface beneath land ice, ocean water or soil. m - The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Altitude is the (geometric) height above the geoid, which is the reference geopotential surface. The geoid is similar to mean sea level. "Bedrock" is the solid Earth surface beneath land ice or ocean water. The zero of bedrock altitude change is arbitrary. Isostatic adjustment is the vertical movement of the lithosphere due to changing surface ice and water loads. + The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Altitude is the (geometric) height above the geoid, which is the reference geopotential surface. The geoid is similar to mean sea level. "Bedrock" is the solid Earth surface beneath land ice, ocean water or soil. The zero of bedrock altitude change is arbitrary. Isostatic adjustment is the vertical movement of the lithosphere due to changing surface ice and water loads. + + + + m + + + The bedrock_depth_below_ground_level is the vertical distance between the ground and the bedrock. "Bedrock" refers to the surface of the consolidated rock, beneath any unconsolidated rock, sediment, soil, water or land ice. "Ground level" means the level of the solid surface in land areas without permanent inland water, beneath any snow, ice or surface water. + + + + + + + "Biological taxon" is a name or other label identifying an organism or a group of organisms as belonging to a unit of classification in a hierarchical taxonomy. The quantity with standard name biological_taxon_lsid is the machine-readable identifier based on a taxon registration system using the syntax convention specified for the Life Science Identifier (LSID) - urn:lsid:<Authority>:<Namespace>:<ObjectID>[:<Version>]. This includes the reference classification in the element and these are restricted by the LSID governance. It is strongly recommended in CF that the authority chosen is World Register of Marine Species (WoRMS) for oceanographic data and Integrated Taxonomic Information System (ITIS) for freshwater and terrestrial data. See Section 6.1.2 of the CF convention (version 1.8 or later) for information about biological taxon auxiliary coordinate variables. This identifier is a narrower equivalent to the scientificNameID field in the Darwin Core Standard. + + + + + + + "Biological taxon" is a name or other label identifying an organism or a group of organisms as belonging to a unit of classification in a hierarchical taxonomy. The quantity with standard name biological_taxon_name is the human-readable label for the taxon such as Calanus finmarchicus. The label should be registered in either WoRMS (http://www.marinespecies.org) or ITIS (https://www.itis.gov/) and spelled exactly as registered. See Section 6.1.2 of the CF convention (version 1.8 or later) for information about biological taxon auxiliary coordinate variables. @@ -2435,21 +2594,21 @@ totals are summed to obtain the index. K 118 - The brightness temperature of a body is the temperature of a black body which radiates the same power per unit solid angle per unit area. + The brightness temperature of a body is the temperature of a black body which radiates the same power per unit solid angle per unit area. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units).. K - The brightness temperature of a body is the temperature of a black body which radiates the same power per unit solid angle per unit area. "anomaly" means difference from climatology. + The brightness temperature of a body is the temperature of a black body which radiates the same power per unit solid angle per unit area. "anomaly" means difference from climatology. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K - cloud_top refers to the top of the highest cloud. brightness_temperature of a body is the temperature of a black body which radiates the same power per unit solid angle per unit area. A coordinate variable of radiation_wavelength, sensor_band_central_radiation_wavelength, or radiation_frequency may be specified to indicate that the brightness temperature applies at specific wavelengths or frequencies. + cloud_top refers to the top of the highest cloud. brightness_temperature of a body is the temperature of a black body which radiates the same power per unit solid angle per unit area. A coordinate variable of radiation_wavelength, sensor_band_central_radiation_wavelength, or radiation_frequency may be specified to indicate that the brightness temperature applies at specific wavelengths or frequencies. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). @@ -2470,7 +2629,14 @@ totals are summed to obtain the index. 1 - "X_area_fraction" means the fraction of horizontal area occupied by X. "X_area" means the horizontal area occupied by X within the grid cell. "Burned area" means the area of burned vegetation. + "Area fraction" is the fraction of a grid cell's horizontal area that has some characteristic of interest. It is evaluated as the area of interest divided by the grid cell area. It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. "Burned area" means the area of burned vegetation. + + + + 1 + + + The Canadian Fire Weather Index (CFWI) is a numerical rating of potential frontal fire intensity from the Canadian Forest Fire Index System. It indicates fire intensity by combining the rate of spread with the amount of fuel being consumed and is also used for general public information about fire danger conditions. It is a function of wind speed, temperature, relative humidity, and precipitation. The calculation accounts for multiple layers of flammable material on the ground as well as fine fuels above the surface, combined with the expected rate of spread of fire. The index is open ended. @@ -2512,14 +2678,14 @@ totals are summed to obtain the index. K - "Canopy temperature" is the bulk temperature of the canopy, not the surface (skin) temperature. "Canopy" means the vegetative covering over a surface. The canopy is often considered to be the outer surfaces of the vegetation. Plant height and the distribution, orientation and shape of plant leaves within a canopy influence the atmospheric environment and many plant processes within the canopy. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Canopy. + "Canopy temperature" is the bulk temperature of the canopy, not the surface (skin) temperature. "Canopy" means the vegetative covering over a surface. The canopy is often considered to be the outer surfaces of the vegetation. Plant height and the distribution, orientation and shape of plant leaves within a canopy influence the atmospheric environment and many plant processes within the canopy. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Canopy. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). kg m-2 s-1 - "Canopy" means the vegetative covering over a surface. The canopy is often considered to be the outer surfaces of the vegetation. Plant height and the distribution, orientation and shape of plant leaves within a canopy influence the atmospheric environment and many plant processes within the canopy. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Canopy. "Throughfall" is the part of the precipitation flux that reaches the ground directly through the vegetative canopy, through intershrub spaces in the canopy, and as drip from the leaves, twigs, and stems (but not including snowmelt). In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. + "Canopy" means the vegetative covering over a surface. The canopy is often considered to be the outer surfaces of the vegetation. Plant height and the distribution, orientation and shape of plant leaves within a canopy influence the atmospheric environment and many plant processes within the canopy. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Canopy. "Throughfall" is the part of the precipitation flux that reaches the ground directly through the vegetative canopy, through spaces in the canopy, and as drip from the leaves, twigs, and stems (but not including snowmelt). In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. @@ -2543,7 +2709,7 @@ totals are summed to obtain the index. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Anthropogenic" means influenced, caused, or created by human activity. Examples of "forestry and agricultural products" are paper, cardboard, furniture, timber for construction, biofuels and food for both humans and livestock. Models that simulate land use changes have one or more pools of carbon that represent these products in order to conserve carbon and allow its eventual release into the atmosphere, for example, when the products decompose in landfill sites. "Anthropogenic land use change" means human changes to land, excluding forest regrowth. It includes fires ignited by humans for the purpose of land use change and the processes of eventual disposal and decomposition of wood products such as paper, cardboard, furniture and timber for construction. - + kg m-2 s-1 @@ -2564,6 +2730,13 @@ totals are summed to obtain the index. "Vegetation" means any plants e.g. trees, shrubs, grass. "Litter" is dead plant material in or above the soil. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. + + kg s-1 + + + The amount of total carbon mass transported in the river channels from land into the ocean. This quantity can be provided at a certain location within the river network and floodplain (over land) or at the river mouth (over ocean) where the river enters the ocean. "River" refers to water in the fluvial system (stream and floodplain). + + m2 @@ -2582,14 +2755,14 @@ totals are summed to obtain the index. J m-2 - The surface called "surface" means the lower boundary of the atmosphere. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "wrt" means with respect to. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. "Atmosphere energy content" has not yet been precisely defined! Please express your views on this quantity on the CF email list. See Appendix D of the CF convention for information about dimensionless vertical coordinates. + The surface called "surface" means the lower boundary of the atmosphere. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "wrt" means with respect to. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. "Atmosphere energy content" has not yet been precisely defined! Please express your views on this quantity on the CF email list. See Appendix D of the CF convention for information about parametric vertical coordinates. J m-2 - The surface called "surface" means the lower boundary of the atmosphere. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "wrt" means with respect to. "Content" indicates a quantity per unit area. "Layer" means any layer with upper and lower boundaries that have constant values in some vertical coordinate. There must be a vertical coordinate variable indicating the extent of the layer(s). If the layers are model layers, the vertical coordinate can be model_level_number, but it is recommended to specify a physical coordinate (in a scalar or auxiliary coordinate variable) as well. See Appendix D of the CF convention for information about dimensionless vertical coordinates. + "Content" indicates a quantity per unit area. "Layer" means any layer with upper and lower boundaries that have constant values in some vertical coordinate. There must be a vertical coordinate variable indicating the extent of the layer(s). If the layers are model layers, the vertical coordinate can be model_level_number, but it is recommended to specify a physical coordinate (in a scalar or auxiliary coordinate variable) as well. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. The abbreviation "wrt" means with respect to. The surface called "surface" means the lower boundary of the atmosphere. See Appendix D of the CF convention for information about parametric vertical coordinates. @@ -2599,6 +2772,13 @@ totals are summed to obtain the index. "Amount" means mass per unit area. Zero change in land ice amount is an arbitrary level. "Land ice" means glaciers, ice-caps and ice-sheets resting on bedrock and also includes ice-shelves. + + kg + + + Zero change in land ice mass is an arbitrary level. "Land ice" means glaciers, ice-caps and ice-sheets resting on bedrock and also includes ice-shelves. The horizontal domain over which the quantity is calculated is described by the associated coordinate variables and coordinate bounds or by a coordinate variable or scalar coordinate variable with the standard name of "region" supplied according to section 6.1.1 of the CF conventions. + + kg m-2 @@ -2666,7 +2846,7 @@ totals are summed to obtain the index. K - "change_over_time_in_X" means change in a quantity X over a time-interval, which should be defined by the bounds of the time coordinate. Conservative Temperature is defined as part of the Thermodynamic Equation of Seawater 2010 (TEOS-10) which was adopted in 2010 by the International Oceanographic Commission (IOC). Conservative Temperature is specific potential enthalpy (which has the standard name sea_water_specific_potential_enthalpy) divided by a fixed value of the specific heat capacity of sea water, namely cp_0 = 3991.86795711963 J kg-1 K-1. Conservative Temperature is a more accurate measure of the "heat content" of sea water, by a factor of one hundred, than is potential temperature. Because of this, it can be regarded as being proportional to the heat content of sea water per unit mass. Reference: www.teos-10.org; McDougall, 2003 doi: 10.1175/1520-0485(2003)033<0945:PEACOV>2.0.CO;2. + The phrase "change_over_time_in_X" means change in a quantity X over a time-interval, which should be defined by the bounds of the time coordinate. Conservative Temperature is defined as part of the Thermodynamic Equation of Seawater 2010 (TEOS-10) which was adopted in 2010 by the International Oceanographic Commission (IOC). Conservative Temperature is specific potential enthalpy (which has the standard name sea_water_specific_potential_enthalpy) divided by a fixed value of the specific heat capacity of sea water, namely cp_0 = 3991.86795711963 J kg-1 K-1. Conservative Temperature is a more accurate measure of the "heat content" of sea water, by a factor of one hundred, than is potential temperature. Because of this, it can be regarded as being proportional to the heat content of sea water per unit mass. Reference: www.teos-10.org; McDougall, 2003 doi: 10.1175/1520-0485(2003)033<0945:PEACOV>2.0.CO;2. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). @@ -2687,21 +2867,21 @@ totals are summed to obtain the index. kg m-3 - Potential density is the density a parcel of air or sea water would have if moved adiabatically to a reference pressure, by default assumed to be sea level pressure. For sea water potential density, if 1000 kg m-3 is subtracted, the standard name "sea_water_sigma_theta" should be chosen instead. "change_over_time_in_X" means change in a quantity X over a time-interval, which should be defined by the bounds of the time coordinate. + The phrase "change_over_time_in_X" means change in a quantity X over a time interval, which should be defined by the bounds of the time coordinate. Sea water potential density is the density a parcel of sea water would have if moved adiabatically to a reference pressure, by default assumed to be sea level pressure. To specify the reference pressure to which the quantity applies, provide a scalar coordinate variable with standard name reference_pressure. The density of a substance is its mass per unit volume. For sea water potential density, if 1000 kg m-3 is subtracted, the standard name "sea_water_sigma_theta" should be chosen instead. K - Potential temperature is the temperature a parcel of air or sea water would have if moved adiabatically to sea level pressure. "change_over_time_in_X" means change in a quantity X over a time-interval, which should be defined by the bounds of the time coordinate. + Potential temperature is the temperature a parcel of air or sea water would have if moved adiabatically to sea level pressure. The phrase "change_over_time_in_X" means change in a quantity X over a time-interval, which should be defined by the bounds of the time coordinate. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). 1 - "change_over_time_in_X" means change in a quantity X over a time-interval, which should be defined by the bounds of the time coordinate. Practical Salinity, S_P, is a determination of the salinity of sea water, based on its electrical conductance. The measured conductance, corrected for temperature and pressure, is compared to the conductance of a standard potassium chloride solution, producing a value on the Practical Salinity Scale of 1978 (PSS-78). This name should not be used to describe salinity observations made before 1978, or ones not based on conductance measurements. Conversion of Practical Salinity to other precisely defined salinity measures should use the appropriate formulas specified by TEOS-10. Other standard names for precisely defined salinity quantities are sea_water_absolute_salinity (S_A); sea_water_preformed_salinity (S_*), sea_water_reference_salinity (S_R); sea_water_cox_salinity (S_C), used for salinity observations between 1967 and 1977; and sea_water_knudsen_salinity (S_K), used for salinity observations between 1901 and 1966. Salinity quantities that do not match any of the precise definitions shoul d be given the more general standard name of sea_water_salinity. Reference: www.teos-10.org; Lewis, 1980 doi:10.1109/JOE.1980.1145448. + The phrase "change_over_time_in_X" means change in a quantity X over a time interval, which should be defined by the bounds of the time coordinate. Practical Salinity, S_P, is a determination of the salinity of sea water, based on its electrical conductance. The measured conductance, corrected for temperature and pressure, is compared to the conductance of a standard potassium chloride solution, producing a value on the Practical Salinity Scale of 1978 (PSS-78). This name should not be used to describe salinity observations made before 1978, or ones not based on conductance measurements. Conversion of Practical Salinity to other precisely defined salinity measures should use the appropriate formulas specified by TEOS-10. Other standard names for precisely defined salinity quantities are sea_water_absolute_salinity (S_A); sea_water_preformed_salinity (S_*), sea_water_reference_salinity (S_R); sea_water_cox_salinity (S_C), used for salinity observations between 1967 and 1977; and sea_water_knudsen_salinity (S_K), used for salinity observations between 1901 and 1966. Salinity quantities that do not match any of the precise definitions should be given the more general standard name of sea_water_salinity. Reference: www.teos-10.org; Lewis, 1980 doi:10.1109/JOE.1980.1145448. @@ -2729,14 +2909,14 @@ totals are summed to obtain the index. K - "change_over_time_in_X" means change in a quantity X over a time-interval, which should be defined by the bounds of the time coordinate.Sea water temperature is the in situ temperature of the sea water. To specify the depth at which the temperature applies use a vertical coordinate variable or scalar coordinate variable. There are standard names for sea_surface_temperature, sea_surface_skin_temperature, sea_surface_subskin_temperature and sea_surface_foundation_temperature which can be used to describe data located at the specified surfaces. For observed data, depending on the period during which the observation was made, the measured in situ temperature was recorded against standard "scales". These historical scales include the International Practical Temperature Scale of 1948 (IPTS-48; 1948-1967), the International Practical Temperature Scale of 1968 (IPTS-68, Barber, 1969; 1968-1989) and the International Temperature Scale of 1990 (ITS-90, Saunders 1990; 1990 onwards). Conversion of data between these scales follows t68 = t48 - (4.4 x 10e-6) * t48(100 - t - 48); t90 = 0.99976 * t68. Observations made prior to 1948 (IPTS-48) have not been documented and therefore a conversion cannot be certain. Differences between t90 and t68 can be up to 0.01 at temperatures of 40 C and above; differences of 0.002-0.007 occur across the standard range of ocean temperatures (-10 - 30 C). The International Equation of State of Seawater 1980 (EOS-80, UNESCO, 1981) and the Practical Salinity Scale (PSS-78) were both based on IPTS-68, while the Thermodynamic Equation of Seawater 2010 (TEOS-10) is based on ITS-90. References: Barber, 1969, doi: 10.1088/0026-1394/5/2/001; UNESCO, 1981; Saunders, 1990, WOCE Newsletter, 10, September 1990. + The phrase "change_over_time_in_X" means change in a quantity X over a time-interval, which should be defined by the bounds of the time coordinate.Sea water temperature is the in situ temperature of the sea water. To specify the depth at which the temperature applies use a vertical coordinate variable or scalar coordinate variable. There are standard names for sea_surface_temperature, sea_surface_skin_temperature, sea_surface_subskin_temperature and sea_surface_foundation_temperature which can be used to describe data located at the specified surfaces. For observed data, depending on the period during which the observation was made, the measured in situ temperature was recorded against standard "scales". These historical scales include the International Practical Temperature Scale of 1948 (IPTS-48; 1948-1967), the International Practical Temperature Scale of 1968 (IPTS-68, Barber, 1969; 1968-1989) and the International Temperature Scale of 1990 (ITS-90, Saunders 1990; 1990 onwards). Conversion of data between these scales follows t68 = t48 - (4.4 x 10e-6) * t48(100 - t - 48); t90 = 0.99976 * t68. Observations made prior to 1948 (IPTS-48) have not been documented and therefore a conversion cannot be certain. Differences between t90 and t68 can be up to 0.01 at temperatures of 40 C and above; differences of 0.002-0.007 occur across the standard range of ocean temperatures (-10 - 30 C). The International Equation of State of Seawater 1980 (EOS-80, UNESCO, 1981) and the Practical Salinity Scale (PSS-78) were both based on IPTS-68, while the Thermodynamic Equation of Seawater 2010 (TEOS-10) is based on ITS-90. References: Barber, 1969, doi: 10.1088/0026-1394/5/2/001; UNESCO, 1981; Saunders, 1990, WOCE Newsletter, 10, September 1990. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). kg m-2 - The surface called "surface" means the lower boundary of the atmosphere. "change_over_time_in_X" means change in a quantity X over a time-interval, which should be defined by the bounds of the time coordinate. "Amount" means mass per unit area. Surface amount refers to the amount on the ground, excluding that on the plant or vegetation canopy. + The phrase "change_over_time_in_X" means change in a quantity X over a time-interval, which should be defined by the bounds of the time coordinate. "Amount" means mass per unit area. Surface snow amount refers to the amount on the solid ground or on surface ice cover, but excludes, for example, falling snowflakes and snow on plants. @@ -2753,11 +2933,25 @@ totals are summed to obtain the index. The phrase "change_over_time_in_X" means change in a quantity X over a time-interval, which should be defined by the bounds of the time coordinate. "Content" indicates a quantity per unit area. Thermal energy is the total vibrational energy, kinetic and potential, of all the molecules and atoms in a substance. "Vegetation" means any living plants e.g. trees, shrubs, grass. The term "plants" refers to the kingdom of plants in the modern classification which excludes fungi. Plants are autotrophs i.e. "producers" of biomass using carbon obtained from carbon dioxide. "Litter" is dead plant material in or above the soil. It is distinct from coarse wood debris. The precise distinction between "fine" and "coarse" is model dependent. The "soil content" of a quantity refers to the vertical integral from the surface down to the bottom of the soil model. For the content between specified levels in the soil, standard names including "content_of_soil_layer" are used. + + 1 + + + Coefficient value, based on the Charnock (1955) empirical expression for deriving the quantity with standard name surface_roughness_length_for_momentum_in_air over the ocean. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Charnock%27s_relation. The surface called "surface" means the lower boundary of the atmosphere. + + 1 - "X_area_fraction" means the fraction of horizontal area occupied by X. "X_area" means the horizontal area occupied by X within the grid cell. The clear_sky area fraction is for the whole atmosphere column, as seen from the surface or the top of the atmosphere. + "Area fraction" is the fraction of a grid cell's horizontal area that has some characteristic of interest. It is evaluated as the area of interest divided by the grid cell area. It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. The clear_sky area fraction is for the whole atmosphere column, as seen from the surface or the top of the atmosphere. "Clear sky" means in the absence of clouds. + + + + 1 + + + A quality flag that reports the result of the Climatology test, which checks that values are within reasonable range bounds for a given time and location. The linkage between the data variable and this variable is achieved using the ancillary_variables attribute. There are standard names for other specific quality tests which take the form of X_quality_flag. Quality information that does not match any of the specific quantities should be given the more general standard name of quality_flag. @@ -2771,14 +2965,14 @@ totals are summed to obtain the index. 1 71 E164 clt - "X_area_fraction" means the fraction of horizontal area occupied by X. "X_area" means the horizontal area occupied by X within the grid cell. Cloud area fraction is also called "cloud amount" and "cloud cover". The cloud area fraction is for the whole atmosphere column, as seen from the surface or the top of the atmosphere. The cloud area fraction in a layer of the atmosphere has the standard name cloud_area_fraction_in_atmosphere_layer. + "Area fraction" is the fraction of a grid cell's horizontal area that has some characteristic of interest. It is evaluated as the area of interest divided by the grid cell area. It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. The cloud area fraction is for the whole atmosphere column, as seen from the surface or the top of the atmosphere. For the cloud area fraction between specified levels in the atmosphere, standard names including "cloud_area_fraction_in_atmosphere_layer" are used. Standard names also exist for high, medium and low cloud types. Cloud area fraction is also called "cloud amount" and "cloud cover". 1 cl - "Layer" means any layer with upper and lower boundaries that have constant values in some vertical coordinate. There must be a vertical coordinate variable indicating the extent of the layer(s). If the layers are model layers, the vertical coordinate can be model_level_number, but it is recommended to specify a physical coordinate (in a scalar or auxiliary coordinate variable) as well. "X_area_fraction" means the fraction of horizontal area occupied by X. "X_area" means the horizontal area occupied by X within the grid cell. Cloud area fraction is also called "cloud amount" and "cloud cover". + "Area fraction" is the fraction of a grid cell's horizontal area that has some characteristic of interest. It is evaluated as the area of interest divided by the grid cell area. It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. "Layer" means any layer with upper and lower boundaries that have constant values in some vertical coordinate. There must be a vertical coordinate variable indicating the extent of the layer(s). If the layers are model layers, the vertical coordinate can be model_level_number, but it is recommended to specify a physical coordinate (in a scalar or auxiliary coordinate variable) as well. Standard names referring only to "cloud_area_fraction" should be used for quantities for the whole atmosphere column. Standard names also exist for high, medium and low cloud types. Cloud area fraction is also called "cloud amount" and "cloud cover". @@ -2806,7 +3000,7 @@ totals are summed to obtain the index. 1 - Cloud liquid water mixing ratio of a parcel of air is the ratio of the mass of liquid water to the mass of dry air. + Cloud liquid water mixing ratio of a parcel of air is the ratio of the mass of liquid water to the mass of dry air. "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. @@ -2816,6 +3010,20 @@ totals are summed to obtain the index. cloud_top refers to the top of the highest cloud. Altitude is the (geometric) height above the geoid, which is the reference geopotential surface. The geoid is similar to mean sea level. + + 1 + + + A variable with the standard_name of cloud_type contains either strings which indicate the cloud type, or flags which can be translated to strings using flag_values and flag_meanings attributes. + + + + m-3 + + + "Colony forming unit" means an estimate of the viable bacterial or fungal numbers determined by counting colonies grown from a sample. "Number concentration" means the number of particles or other specified objects per unit volume. "Biological taxon" is a name or other label identifying an organism or a group of organisms as belonging to a unit of classification in a hierarchical taxonomy. There must be an auxiliary coordinate variable with standard name biological_taxon_name to identify the taxon in human readable format and optionally an auxiliary coordinate variable with standard name biological_taxon_lsid to provide a machine-readable identifier. See Section 6.1.2 of the CF convention (version 1.8 or later) for information about biological taxon auxiliary coordinate variables. + + Pa m @@ -2823,6 +3031,20 @@ totals are summed to obtain the index. "Compressive strength" is a measure of the capacity of a material to withstand compressive forces. If compressive forces are exerted on a material in excess of its compressive strength, fracturing will occur. "Sea ice" means all ice floating in the sea which has formed from freezing sea water, rather than by other processes such as calving of land ice to form icebergs. + + Pa + + + The maximum force applied as axial strain to an unconfined frozen soil sample before failure. + + + + Pa + + + The maximum force applied as axial strain to an unconfined soil sample before failure. + + 1 @@ -2841,14 +3063,14 @@ totals are summed to obtain the index. 1 72 E185 - "X_area_fraction" means the fraction of horizontal area occupied by X. "X_area" means the horizontal area occupied by X within the grid cell. Cloud area fraction is also called "cloud amount" and "cloud cover". The cloud area fraction is for the whole atmosphere column, as seen from the surface or the top of the atmosphere. The cloud area fraction in a layer of the atmosphere has the standard name cloud_area_fraction_in_atmosphere_layer. Convective cloud is that produced by the convection schemes in an atmosphere model. + "Area fraction" is the fraction of a grid cell's horizontal area that has some characteristic of interest. It is evaluated as the area of interest divided by the grid cell area. It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. The cloud area fraction is for the whole atmosphere column, as seen from the surface or the top of the atmosphere. For the cloud area fraction between specified levels in the atmosphere, standard names including "cloud_area_fraction_in_atmosphere_layer" are used. Standard names also exist for high, medium and low cloud types. Convective cloud is that produced by the convection schemes in an atmosphere model. Cloud area fraction is also called "cloud amount" and "cloud cover". 1 - "Layer" means any layer with upper and lower boundaries that have constant values in some vertical coordinate. There must be a vertical coordinate variable indicating the extent of the layer(s). If the layers are model layers, the vertical coordinate can be model_level_number, but it is recommended to specify a physical coordinate (in a scalar or auxiliary coordinate variable) as well. "X_area_fraction" means the fraction of horizontal area occupied by X. "X_area" means the horizontal area occupied by X within the grid cell. Cloud area fraction is also called "cloud amount" and "cloud cover". Convective cloud is that produced by the convection schemes in an atmosphere model. + "Area fraction" is the fraction of a grid cell's horizontal area that has some characteristic of interest. It is evaluated as the area of interest divided by the grid cell area. It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. "Layer" means any layer with upper and lower boundaries that have constant values in some vertical coordinate. There must be a vertical coordinate variable indicating the extent of the layer(s). If the layers are model layers, the vertical coordinate can be model_level_number, but it is recommended to specify a physical coordinate (in a scalar or auxiliary coordinate variable) as well. Standard names referring only to "cloud_area_fraction" should be used for quantities for the whole atmosphere column. Standard names also exist for high, medium and low cloud types. Convective cloud is that produced by the convection schemes in an atmosphere model. Cloud area fraction is also called "cloud amount" and "cloud cover". @@ -2956,6 +3178,34 @@ totals are summed to obtain the index. A numerical correction which is added to modelled negative specific humidities in order to obtain a value of zero. + + K m s-1 + + + Covariance refers to the sample covariance rather than the population covariance. The quantity with standard name covariance_over_longitude_of_northward_wind_and_air_temperature is the covariance of the deviations of meridional air velocity and air temperature about their respective zonal mean values. The data variable must be accompanied by a vertical coordinate variable or scalar coordinate variable and is calculated on an isosurface of that vertical coordinate. "Northward" indicates a vector component which is positive when directed northward (negative southward). Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name "upward_air_velocity"). Air temperature is the bulk temperature of the air, not the surface (skin) temperature. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). + + + + 1 + + + The phrase "ratio_of_X_to_Y" means X/Y. It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. Also known as specific gravity, where soil represents a dry soil sample. The density of a substance is its mass per unit volume. + + + + degree_north + + + The latitude of deployment of a station or instrument. The term can be used whenever the deployment position of a station or instrument needs to be supplied along with other types of positions. If a data variable has only one latitude coordinate variable, the standard name of latitude should generally be preferred to deployment_latitude, because latitude is recognised by generic software. If the deployment latitude is also the nominal latitude for a discrete geometry (as in Section 9.5 of the CF convention), the deployment latitude should also, or instead, be recorded in a coordinate variable with the standard name of latitude and axis="Y". Latitude is positive northward; its units of "degree_north" (or equivalent) indicate this explicitly. + + + + degree_east + + + The longitude of deployment of a station or instrument. The term can be used whenever the deployment position of a station or instrument needs to be supplied along with other types of positions. If a data variable has only one longitude coordinate variable, the standard name of longitude should generally be preferred to deployment_longitude, because longitude is recognised by generic software. If the deployment longitude is also the nominal longitude for a discrete geometry (as in Section 9.5 of the CF convention), the deployment longitude should also, or instead, be recorded in a coordinate variable with the standard name of longitude and axis="X". Longitude is positive eastward; its units of "degree_east" (or equivalent) indicate this explicitly. + + m @@ -2963,6 +3213,13 @@ totals are summed to obtain the index. Depth is the vertical distance below the surface. + + m + + + The phrase depth_at_base_of_unfrozen_ground is the instantaneous depth of the downward penetration of thaw from the ground surface at a given time. Permafrost is soil or rock that has remained at a temperature at or below zero degrees Celsius throughout the seasonal cycle for two or more consecutive years. The maximum measurable depth_at_base_of_unfrozen_ground value as recorded at the end of a thawing season corresponds to the permafrost_active_layer_thickness. + + m @@ -3009,14 +3266,49 @@ totals are summed to obtain the index. K 18 - Dew point depression is also called dew point deficit. It is the amount by which the air temperature exceeds its dew point temperature. Dew point temperature is the temperature at which a parcel of air reaches saturation upon being cooled at constant pressure and specific humidity. + Dew point depression is also called dew point deficit. It is the amount by which the air temperature exceeds its dew point temperature. Dew point temperature is the temperature at which a parcel of air reaches saturation upon being cooled at constant pressure and specific humidity. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K 17 - Dew point temperature is the temperature at which a parcel of air reaches saturation upon being cooled at constant pressure and specific humidity. + Dew point temperature is the temperature at which a parcel of air reaches saturation upon being cooled at constant pressure and specific humidity. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). + + + + m + + + "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Ambient_aerosol" means that the aerosol is measured or modelled at the ambient state of pressure, temperature and relative humidity that exists in its immediate environment. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. To specify the relative humidity and temperature at which the quantity described by the standard name applies, provide scalar coordinate variables with standard names of "relative_humidity" and "air_temperature". + + + + degree_C + + + This variable quantifies the temperature difference between the skin temperature (sea_surface_skin_temperature) and the subskin temperature (sea_surface_subskin_temperature) due to the turbulent and radiative heat fluxes at the air-sea interface. This difference is commonly referred to as the “cool skin effect” as the solar radiation absorbed within the very thin thermal subskin layer is typically negligible compared to ocean surface heat loss from the combined sensible, latent, and net longwave radiation heat fluxes. + + + + degree_C + + + This variable quantifies the temperature difference between the top (sea_surface_subskin_temperature) and bottom (sea_surface_foundation_temperature) of the diurnal warm layer. This diurnal warm layer, caused by absorption of solar radiation in the absence of strong mixing, together with a cool skin effect, account for the total temperature difference between the sea_surface_skin_temperature and the sea_surface_foundation_temperature. The cool skin effect is associated with the turbulent and infrared radiative heat loss at the air-sea interface. Freshwater fluxes may also affect this variable (sea_surface_subskin_temperature_minus_sea_surface_foundation_temperature). + + + + degree_C + + + This variable quantifies the temperature difference between the top of the diurnal warm layer (sea_surface_subskin_temperature) and the in-situ measured sea surface temperature at depth (sea_surface_temperature). A diurnal warm layer can develop in the top few meters of the ocean through the absorption of solar radiation, if surface mixing is sufficiently weak. + + + + K + + + Sea surface temperature is usually abbreviated as "SST". It is the temperature of sea water near the surface (including the part under sea-ice, if any), not the skin or interface temperature, whose standard names are sea_surface_skin_temperature and surface_temperature, respectively. For the temperature of sea water at a particular depth or layer, a data variable of "sea_water_temperature" with a vertical coordinate axis should be used. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). @@ -3044,7 +3336,7 @@ totals are summed to obtain the index. 1 - The term "Exner function" is applied to various quantities in the literature. "Dimensionless Exner function" is the standard name of (p/p0)^(R/Cp), where p is pressure, p0 a reference pressure, R the gas constant and Cp the specific heat at constant pressure. This quantity is also the ratio of in-situ to potential temperature. Standard names for other variants can be defined on request. + The term "Exner function" is applied to various quantities in the literature. "Dimensionless Exner function" is the standard name of (p/p0)^(R/Cp), where p is pressure, p0 a reference pressure, R the gas constant and Cp the specific heat at constant pressure. This quantity is also the ratio of in-situ to potential temperature. Standard names for other variants can be defined on request. To specify the reference pressure to which the quantity applies, provide a scalar coordinate variable with standard name reference_pressure. @@ -3058,7 +3350,14 @@ totals are summed to obtain the index. degree - The direction_of_radial_vector_away_from_instrument is the direction in which the instrument itself is pointing. The direction is measured positive clockwise from due north. The "instrument" (examples are radar and lidar) is the device used to make an observation. "direction_of_X" means direction of a vector, a bearing. + The phrase "direction_of_X" means direction of a vector, a bearing. The direction is measured positive clockwise from due north. The direction_of_radial_vector_away_from_instrument is the direction in which the instrument itself is pointing. The "instrument" (examples are radar and lidar) is the device used to make an observation. The standard name direction_of_radial_vector_toward_instrument should be used for a data variable having the opposite sign convention. + + + + degree + + + The phrase "direction_of_X" means direction of a vector, a bearing. The direction is measured positive clockwise from due north. The direction_of_radial_vector_toward_instrument is the direction opposite to that in which the instrument itself is pointing. The "instrument" (examples are radar and lidar) is the device used to make an observation. The standard name direction_of_radial_vector_away_from_instrument should be used for a data variable having the opposite sign convention. @@ -3135,7 +3434,7 @@ totals are summed to obtain the index. Pa - "Eastward" indicates a vector component which is positive when directed eastward (negative westward). "Downward" indicates a vector component which is positive when directed downward (negative upward). "Downward eastward" indicates the ZX component of a tensor. Momentum flux is dimensionally equivalent to stress and pressure. It is a tensor quantity. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The specification of a physical process by the phrase due_to_process means thatthe quantity named is a single term in a sum of terms which together compose the generalquantity named by omitting the phrase. + "Eastward" indicates a vector component which is positive when directed eastward (negative westward). "Downward" indicates a vector component which is positive when directed downward (negative upward). "Downward eastward" indicates the ZX component of a tensor. Momentum flux is dimensionally equivalent to stress and pressure. It is a tensor quantity. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. @@ -3236,6 +3535,20 @@ totals are summed to obtain the index. "x" indicates a vector component along the grid x-axis, positive with increasing x. "Downward" indicates a vector component which is positive when directed downward (negative upward). "Sea ice" means all ice floating in the sea which has formed from freezing sea water, rather than by other processes such as calving of land ice to form icebergs. + + Pa + + + "Downward" indicates a vector component which is positive when directed downward (negative upward). "x" indicates a vector component along the grid x-axis, positive with increasing x. A downward x stress is a downward flux of momentum towards the positive direction of the model's x-axis. The phrase "sea water surface" means the upper boundary of the liquid portion of an ocean or sea, including the boundary to floating ice if present. + + + + Pa + + + "Downward" indicates a vector component which is positive when directed downward (negative upward). "x" indicates a vector component along the grid x-axis, positive with increasing x. A downward x stress is a downward flux of momentum towards the positive direction of the model's x-axis. A positive correction is downward i.e. added to the ocean. The phrase "sea water surface" means the upper boundary of the liquid portion of an ocean or sea, including the boundary to floating ice if present. + + Pa @@ -3243,6 +3556,20 @@ totals are summed to obtain the index. "y" indicates a vector component along the grid y-axis, positive with increasing y. "Downward" indicates a vector component which is positive when directed downward (negative upward). "Sea ice" means all ice floating in the sea which has formed from freezing sea water, rather than by other processes such as calving of land ice to form icebergs. + + Pa + + + "Downward" indicates a vector component which is positive when directed downward (negative upward). "y" indicates a vector component along the grid y-axis, positive with increasing y. A downward y stress is a downward flux of momentum towards the positive direction of the model's y-axis. The phrase "sea water surface" means the upper boundary of the liquid portion of an ocean or sea, including the boundary to floating ice if present. + + + + Pa + + + "Downward" indicates a vector component which is positive when directed downward (negative upward). "y" indicates a vector component along the grid y-axis, positive with increasing y. A downward y stress is a downward flux of momentum towards the positive direction of the model's y-axis. A positive correction is downward i.e. added to the ocean. The phrase "sea water surface" means the upper boundary of the liquid portion of an ocean or sea, including the boundary to floating ice if present. + + W m-2 @@ -3257,6 +3584,20 @@ totals are summed to obtain the index. Downwelling radiation is radiation from above. It does not mean "net downward". The sign convention is that "upwelling" is positive upwards and "downwelling" is positive downwards. The term "longwave" means longwave radiation. When thought of as being incident on a surface, a radiative flux is sometimes called "irradiance". In addition, it is identical with the quantity measured by a cosine-collector light-meter and sometimes called "vector irradiance". In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. A phrase assuming_condition indicates that the named quantity is the value which would obtain if all aspects of the system were unaltered except for the assumption of the circumstances specified by the condition. "Clear sky" means in the absence of clouds. + + W/m2 + + + Downwelling radiation is radiation from above. It does not mean "net downward". The sign convention is that "upwelling" is positive upwards and "downwelling" is positive downwards. The term "longwave" means longwave radiation. When thought of as being incident on a surface, a radiative flux is sometimes called "irradiance". In addition, it is identical with the quantity measured by a cosine-collector light-meter and sometimes called "vector irradiance". In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. A phrase assuming_condition indicates that the named quantity is the value which would obtain if all aspects of the system were unaltered except for the assumption of the circumstances specified by the condition. "Clear sky" means in the absence of clouds. This 3D ozone field acts as a reference ozone field in a diagnostic call to the model's radiation scheme. It is expressed in terms of mole fraction of ozone in air. It may be observation-based or model-derived. It may be from any time period. By using the same ozone reference in the diagnostic radiation call in two model simulations and calculating differences between the radiative flux diagnostics from the prognostic call to the radiation scheme and the diagnostic call to the radiation scheme with the ozone reference, an instantaneous radiative forcing for ozone can be calculated. + + + + W/m2 + + + Downwelling radiation is radiation from above. It does not mean "net downward". The sign convention is that "upwelling" is positive upwards and "downwelling" is positive downwards. The term "longwave" means longwave radiation. When thought of as being incident on a surface, a radiative flux is sometimes called "irradiance". In addition, it is identical with the quantity measured by a cosine-collector light-meter and sometimes called "vector irradiance". In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. A phrase assuming_condition indicates that the named quantity is the value which would obtain if all aspects of the system were unaltered except for the assumption of the circumstances specified by the condition. This 3D ozone field acts as a reference ozone field in a diagnostic call to the model's radiation scheme. It is expressed in terms of mole fraction of ozone in air. It may be observation-based or model-derived. It may be from any time period. By using the same ozone reference in the diagnostic radiation call in two model simulations and calculating differences between the radiative flux diagnostics from the prognostic call to the radiation scheme and the diagnostic call to the radiation scheme with the ozone reference, an instantaneous radiative forcing for ozone can be calculated. + + W m-2 sr-1 @@ -3411,6 +3752,20 @@ totals are summed to obtain the index. Downwelling radiation is radiation from above. It does not mean "net downward". The sign convention is that "upwelling" is positive upwards and "downwelling" is positive downwards. The term "shortwave" means shortwave radiation. When thought of as being incident on a surface, a radiative flux is sometimes called "irradiance". In addition, it is identical with the quantity measured by a cosine-collector light-meter and sometimes called "vector irradiance". In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. A phrase "assuming_condition" indicates that the named quantity is the value which would obtain if all aspects of the system were unaltered except for the assumption of the circumstances specified by the condition. "Clear sky" means in the absence of clouds. + + W/m2 + + + Downwelling radiation is radiation from above. It does not mean "net downward". The sign convention is that "upwelling" is positive upwards and "downwelling" is positive downwards. The term "shortwave" means shortwave radiation. When thought of as being incident on a surface, a radiative flux is sometimes called "irradiance". In addition, it is identical with the quantity measured by a cosine-collector light-meter and sometimes called "vector irradiance". In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. A phrase "assuming_condition" indicates that the named quantity is the value which would obtain if all aspects of the system were unaltered except for the assumption of the circumstances specified by the condition. "Clear sky" means in the absence of clouds. This 3D ozone field acts as a reference ozone field in a diagnostic call to the model's radiation scheme. It is expressed in terms of mole fraction of ozone in air. It may be observation-based or model-derived. It may be from any time period. By using the same ozone reference in the diagnostic radiation call in two model simulations and calculating differences between the radiative flux diagnostics from the prognostic call to the radiation scheme and the diagnostic call to the radiation scheme with the ozone reference, an instantaneous radiative forcing for ozone can be calculated. + + + + W/m2 + + + Downwelling radiation is radiation from above. It does not mean "net downward". The sign convention is that "upwelling" is positive upwards and "downwelling" is positive downwards. The term "shortwave" means shortwave radiation. When thought of as being incident on a surface, a radiative flux is sometimes called "irradiance". In addition, it is identical with the quantity measured by a cosine-collector light-meter and sometimes called "vector irradiance". In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. A phrase "assuming_condition" indicates that the named quantity is the value which would obtain if all aspects of the system were unaltered except for the assumption of the circumstances specified by the condition. This 3D ozone field acts as a reference ozone field in a diagnostic call to the model's radiation scheme. It is expressed in terms of mole fraction of ozone in air. It may be observation-based or model-derived. It may be from any time period. By using the same ozone reference in the diagnostic radiation call in two model simulations and calculating differences between the radiative flux diagnostics from the prognostic call to the radiation scheme and the diagnostic call to the radiation scheme with the ozone reference, an instantaneous radiative forcing for ozone can be calculated. + + W m-2 @@ -3446,6 +3801,13 @@ totals are summed to obtain the index. Downwelling radiation is radiation from above. It does not mean "net downward". The sign convention is that "upwelling" is positive upwards and "downwelling" is positive downwards. Spherical irradiance is the radiation incident on unit area of a hemispherical (or "2-pi") collector. It is sometimes called "scalar irradiance". The direction (up/downwelling) is specified. Radiation incident on a 4-pi collector has standard names of "omnidirectional spherical irradiance". A coordinate variable for radiation wavelength should be given the standard name radiation_wavelength. + + kg m-2 + + + The quantity with standard name drainage_amount_through_base_of_soil_model is the amount of water that drains through the bottom of a soil column extending from the surface to a specified depth. "Drainage" is the process of removal of excess water from soil by gravitational flow. "Amount" means mass per unit area. A vertical coordinate variable or scalar coordinate with standard name "depth" should be used to specify the depth to which the soil column extends. + + 1 @@ -3467,6 +3829,13 @@ totals are summed to obtain the index. "Content" indicates a quantity per unit area. "Layer" means any layer with upper and lower boundaries that have constant values in some vertical coordinate. There must be a vertical coordinate variable indicating the extent of the layer(s). If the layers are model layers, the vertical coordinate can be model_level_number, but it is recommended to specify a physical coordinate (in a scalar or auxiliary coordinate variable) as well. Dry energy is the sum of dry static energy and kinetic energy. Dry static energy is the sum of enthalpy and potential energy (itself the sum of gravitational and centripetal potential energy). Enthalpy can be written either as (1) CpT, where Cp is heat capacity at constant pressure, T is absolute temperature, or (2) U+pV, where U is internal energy, p is pressure and V is volume. + + kg m-3 + + + The density of the soil after oven drying until constant mass is reached. Volume is determined from the field sample volume. The density of a substance is its mass per unit volume. + + J m-2 @@ -3492,14 +3861,21 @@ totals are summed to obtain the index. 1 - The Advanced Dvorak Technique (ADT) is used to derive a set of Dvorak Tropical numbers using an objective pattern recognition algorithm to determine the intensity of a tropical cyclone by matching observed brightness temperature patterns, maximum sustained winds and minimum sea level pressure to a set of pre-defined tropical cyclone structures. Dvorak Tropical numbers range from 1.0 to 8.0, increasing with storm intensity. Reference: Olander, T. L., & Velden, C. S., The Advanced Dvorak Technique: Continued Development of an Objective Scheme to Estimate Tropical Cyclone Intensity Using Geostationary Infrared Satellite Imagery (2007). American Meterorological Society Weather and Forecasting, 22, 287-298. + The Advanced Dvorak Technique (ADT) is used to derive a set of Dvorak Tropical numbers using an objective pattern recognition algorithm to determine the intensity of a tropical cyclone by matching observed brightness temperature patterns, maximum sustained winds and minimum sea level pressure to a set of pre-defined tropical cyclone structures. Dvorak Tropical numbers range from 1.0 to 8.0, increasing with storm intensity. Reference: Olander, T. L., & Velden, C. S., The Advanced Dvorak Technique: Continued Development of an Objective Scheme to Estimate Tropical Cyclone Intensity Using Geostationary Infrared Satellite Imagery (2007). American Meteorological Society Weather and Forecasting, 22, 287-298. K - The dynamical tropopause used in interpreting the dynamics of the upper troposphere and lower stratosphere. There are various definitions of dynamical tropopause in the scientific literature. + The dynamical tropopause used in interpreting the dynamics of the upper troposphere and lower stratosphere. There are various definitions of dynamical tropopause in the scientific literature. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). + + + + m s-1 + + + The eastward motion of air, relative to near-surface eastward current; calculated as eastward_wind minus eastward_sea_water_velocity. A vertical coordinate variable or scalar coordinate with standard name "depth" should be used to indicate the depth of sea water velocity used in the calculation. Similarly, a vertical coordinate variable or scalar coordinate with standard name "height" should be used to indicate the height of the the wind component. A velocity is a vector quantity. "Eastward" indicates a vector component which is positive when directed eastward (negative westward). @@ -3523,6 +3899,13 @@ totals are summed to obtain the index. "Eastward" indicates a vector component which is positive when directed eastward (negative westward). Transport across_unit_distance means expressed per unit distance normal to the direction of transport. + + s-1 + + + The quantity with standard name eastward_derivative_of_eastward_wind is the derivative of the eastward component of wind with respect to distance in the eastward direction for a given atmospheric level. The phrase "component_derivative_of_X" means derivative of X with respect to distance in the component direction, which may be "northward", "southward", "eastward", "westward", "upward", "downward", "x" or "y". The last two indicate derivatives along the axes of the grid, in the case where they are not true longitude and latitude. A positive value indicates that X is increasing with distance along the positive direction of the axis. Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name "upward_air_velocity"). + + s-1 @@ -3530,6 +3913,20 @@ totals are summed to obtain the index. A velocity is a vector quantity. "Eastward" indicates a vector component which is positive when directed eastward (negative westward). "Northward" indicates a vector component which is positive when directed northward (negative southward). Sea ice velocity is defined as a two-dimensional vector, with no vertical component. "component_derivative_of_X" means derivative of X with respect to distance in the component direction, which may be northward, southward, eastward, westward, x or y. The last two indicate derivatives along the axes of the grid, in the case where they are not true longitude and latitude. The named quantity is a component of the strain rate tensor for sea ice. "Sea ice" means all ice floating in the sea which has formed from freezing sea water, rather than by other processes such as calving of land ice to form icebergs. + + s-1 + + + The quantity with standard name eastward_derivative_of_northward_wind is the derivative of the northward component of wind with respect to distance in the eastward direction for a given atmospheric level. The phrase "component_derivative_of_X" means derivative of X with respect to distance in the component direction, which may be "northward", "southward", "eastward", "westward", "upward", "downward", "x" or "y". The last two indicate derivatives along the axes of the grid, in the case where they are not true longitude and latitude. A positive value indicates that X is increasing with distance along the positive direction of the axis. Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name "upward_air_velocity"). + + + + degree m-1 + + + The quantity with standard name eastward_derivative_of_wind_from_direction is the derivative of wind from_direction with respect to the change in eastward lateral position for a given atmospheric level. The phrase "component_derivative_of_X" means derivative of X with respect to distance in the component direction, which may be "northward", "southward", "eastward", "westward", "upward", "downward", "x" or "y". The last two indicate derivatives along the axes of the grid, in the case where they are not true longitude and latitude. A positive value indicates that X is increasing with distance along the positive direction of the axis. The phrase "from_direction" is used in the construction X_from_direction and indicates the direction from which the velocity vector of X is coming. The direction is a bearing in the usual geographical sense, measured positive clockwise from due north. In meteorological reports, the direction of the wind vector is usually (but not always) given as the direction from which it is blowing ("wind_from_direction") (westerly, northerly, etc.). In other contexts, such as atmospheric modelling, it is often natural to give the direction in the usual manner of vectors as the heading or the direction to which it is blowing ("wind_to_direction") (eastward, southward, etc.). Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name "upward_air_velocity"). + + m s-1 @@ -3537,6 +3934,13 @@ totals are summed to obtain the index. A velocity is a vector quantity. "Eastward" indicates a vector component which is positive when directed eastward (negative westward). Flood water is water that covers land which is normally not covered by water. + + m s-1 + + + A velocity is a vector quantity. "Eastward" indicates a vector component which is positive when directed eastward (negative westward). Friction velocity is a reference wind velocity derived from the relationship between air density and downward stress and is usually applied at a level close to the surface where stress is assumed to independent of height and approximately proportional to the square of mean velocity. + + m s-1 @@ -3586,6 +3990,20 @@ totals are summed to obtain the index. A velocity is a vector quantity. "Eastward" indicates a vector component which is positive when directed eastward (negative westward). A phrase assuming_condition indicates that the named quantity is the value which would obtain if all aspects of the system were unaltered except for the assumption of the circumstances specified by the condition. + + m s-1 + + + A velocity is a vector quantity. "Eastward" indicates a vector component which is positive when directed eastward (negative westward). The velocity at the sea floor is that adjacent to the ocean bottom, which would be the deepest grid cell in an ocean model and within the benthic boundary layer for measurements. + + + + m s-1 + + + A velocity is a vector quantity. "Eastward" indicates a vector component which is positive when directed eastward (negative westward).The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. + + m s-1 @@ -3593,6 +4011,13 @@ totals are summed to obtain the index. "Eastward" indicates a vector component which is positive when directed eastward (negative westward). The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Parameterized mesoscale eddies occur on a spatial scale of many tens of kilometres and an evolutionary time of weeks. Reference: James C. McWilliams 2016, Submesoscale currents in the ocean, Proceedings of the Royal Society A: Mathematical, Physical and Engineering Sciences, volume 472, issue 2189. DOI: 10.1098/rspa.2016.0117. Parameterized mesoscale eddies are represented in ocean models using schemes such as the Gent-McWilliams scheme. + + m s-1 + + + A velocity is a vector quantity. "Eastward" indicates a vector component which is positive when directed eastward (negative westward). The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Due to tides" means due to all astronomical gravity changes which manifest as tides. No distinction is made between different tidal components. + + m s-1 @@ -3621,13 +4046,6 @@ totals are summed to obtain the index. "Eastward" indicates a vector component which is positive when directed eastward (negative westward). Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name upward_air_velocity.) - - s-1 - 45 - - "Eastward" indicates a vector component which is positive when directed eastward (negative westward). Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name upward_air_velocity.) Wind shear is the derivative of wind with respect to height. - - m @@ -3635,102 +4053,123 @@ totals are summed to obtain the index. The effective radius of a size distribution of particles, such as aerosols, cloud droplets or ice crystals,is the area weighted mean radius of particle size. It is calculated as the ratio of the third to the second moment of the particle size distribution. "cloud_top" refers to the top of the highest cloud. "condensed_water" means liquid and ice. - + m - The effective radius of a size distribution of particles, such as aerosols, cloud droplets or ice crystals, is the area weighted mean radius of particle size. It is calculated as the ratio of the third to the second moment of the particle size distribution. + The effective radius of a size distribution of particles, such as aerosols, cloud droplets or ice crystals, is the area weighted mean radius of particle size. It is calculated as the ratio of the third to the second moment of the particle size distribution. "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. - + m - The effective radius of a size distribution of particles, such as aerosols, cloud droplets or ice crystals, is the area weighted mean radius of particle size. It is calculated as the ratio of the third to the second moment of the particle size distribution. cloud_top refers to the top of the highest cloud. + The effective radius of a size distribution of particles, such as aerosols, cloud droplets or ice crystals, is the area weighted mean radius of particle size. It is calculated as the ratio of the third to the second moment of the particle size distribution. "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. The phrase "cloud_top" refers to the top of the highest cloud. - + m The effective radius of a size distribution of particles, such as aerosols, cloud droplets or ice crystals, is the area weighted mean radius of particle size. It is calculated as the ratio of the third to the second moment of the particle size distribution. Convective cloud is that produced by the convection schemes in an atmosphere model. - + m - The effective radius of a size distribution of particles, such as aerosols, cloud droplets or ice crystals, is the area weighted mean radius of particle size. It is calculated as the ratio of the third to the second moment of the particle size distribution. Convective cloud is that produced by the convection schemes in an atmosphere model. + The effective radius of a size distribution of particles, such as aerosols, cloud droplets or ice crystals, is the area weighted mean radius of particle size. It is calculated as the ratio of the third to the second moment of the particle size distribution. Convective cloud is that produced by the convection schemes in an atmosphere model. "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. - + m - The effective radius of a size distribution of particles, such as aerosols, cloud droplets or ice crystals, is the area weighted mean radius of particle size. It is calculated as the ratio of the third to the second moment of the particle size distribution. The phrase "convective_liquid_water_cloud_top" refers to the top of the highest convective liquid water cloud. Convective cloud is that produced by the convection schemes in an atmosphere model. + The effective radius of a size distribution of particles, such as aerosols, cloud droplets or ice crystals, is the area weighted mean radius of particle size. It is calculated as the ratio of the third to the second moment of the particle size distribution. The phrase "convective_liquid_water_cloud_top" refers to the top of the highest convective liquid water cloud. Convective cloud is that produced by the convection schemes in an atmosphere model. "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. - + m The effective radius of a size distribution of particles, such as aerosols, cloud droplets or ice crystals, is the area weighted mean radius of particle size. It is calculated as the ratio of the third to the second moment of the particle size distribution. Convective cloud is that produced by the convection schemes in an atmosphere model. - + m The effective radius of a size distribution of particles, such as aerosols, cloud droplets or ice crystals, is the area weighted mean radius of particle size. It is calculated as the ratio of the third to the second moment of the particle size distribution. Convective cloud is that produced by the convection schemes in an atmosphere model. - + m The effective radius of a size distribution of particles, such as aerosols, cloud droplets or ice crystals, is the area weighted mean radius of particle size. It is calculated as the ratio of the third to the second moment of the particle size distribution. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). - + m The effective radius of a size distribution of particles, such as aerosols, cloud droplets or ice crystals, is the area weighted mean radius of particle size. It is calculated as the ratio of the third to the second moment of the particle size distribution. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). - + m - The effective radius of a size distribution of particles, such as aerosols, cloud droplets or ice crystals, is the area weighted mean radius of particle size. It is calculated as the ratio of the third to the second moment of the particle size distribution. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). + The effective radius of a size distribution of particles, such as aerosols, cloud droplets or ice crystals, is the area weighted mean radius of particle size. It is calculated as the ratio of the third to the second moment of the particle size distribution. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. - + m - The effective radius of a size distribution of particles, such as aerosols, cloud droplets or ice crystals, is the area weighted mean radius of particle size. It is calculated as the ratio of the third to the second moment of the particle size distribution. The phrase "stratiform_liquid_water_cloud_top" refers to the top of the highest stratiform liquid water cloud. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). + The effective radius of a size distribution of particles, such as aerosols, cloud droplets or ice crystals, is the area weighted mean radius of particle size. It is calculated as the ratio of the third to the second moment of the particle size distribution. The phrase "stratiform_liquid_water_cloud_top" refers to the top of the highest stratiform liquid water cloud. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. - + m The effective radius of a size distribution of particles, such as aerosols, cloud droplets or ice crystals, is the area weighted mean radius of particle size. It is calculated as the ratio of the third to the second moment of the particle size distribution. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). - + m The effective radius of a size distribution of particles, such as aerosols, cloud droplets or ice crystals, is the area weighted mean radius of particle size. It is calculated as the ratio of the third to the second moment of the particle size distribution. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). - + m - The diameter of an aerosol particle as selected by its electrical mobility. + The diameter of an aerosol particle as selected by its electrical mobility. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Ambient_aerosol" means that the aerosol is measured or modelled at the ambient state of pressure, temperature and relative humidity that exists in its immediate environment. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. To specify the relative humidity and temperature at which the quantity described by the standard name applies, provide scalar coordinate variables with standard names of "relative_humidity" and "air_temperature". + + + + 1 + + + Isotopic enrichment of 13C, often called delta 13C, is a measure of the ratio of stable isotopes 13C:12C. It is a parameterisation of the 13C/12C isotopic ratio in the sample with respect to the isotopic ratio in a reference standard (in this case Vienna Pee Dee Belemnite). It is computed using the formula (((13C/12C)sample / (13C/12C)standard) - 1) * 1000. Particulate means suspended solids of all sizes. + + + + 1e-3 + + + Isotopic enrichment of 14C, often called d14C or delta14C (lower case delta), is used to calculate the fossil fuel contribution to atmospheric carbon dioxide using isotopic ratios of carbon. It is a parameterisation of the 14C/12C isotopic ratio in the sample with respect to the isotopic ratio in a reference standard. It is computed using the formula (((14C/12C)sample / (14C/12C)standard) - 1) * 1000. The quantity called D14C, or Delta14C (upper case delta) is d14C corrected for isotopic fractionation using the 13C/12C ratio as follows: D14C = d14C - 2(dC13 + 25)(1+d14C/1000). If the sample is enriched in 14C relative to the standard, then the data value is positive. Reference: Stuiver, M. and H.A. Polach, 1977, Discussion reporting of 14C data, Radiocarbon, Volume 19, No. 3, 355-363, doi: 10.1017/S0033822200003672. The reference standard used in the calculation of delta14C should be specified by attaching a long_name attribute to the data variable. "C" means the element carbon and "14C" is the radioactive isotope "carbon-14", having six protons and eight neutrons and used in radiocarbon dating. + + + + 1 + + + Isotopic enrichment of 15N, often called delta 15N, is a measure of the ratio of stable isotopes 15N:14N. It is a parameterisation of the 15N/14N isotopic ratio in the sample with respect to the isotopic ratio in a reference standard (in this case atmospheric nitrogen). It is computed using the formula (((15N/14N)sample / (15N/14N)standard) - 1) * 1000. Particulate means suspended solids of all sizes. @@ -3747,13 +4186,6 @@ totals are summed to obtain the index. Altitude is the (geometric) height above the geoid, which is the reference geopotential surface. The geoid is similar to mean sea level. The equilibrium line is the locus of points on a land ice surface at which ice accumulation balances ice ablation over the year. - - K - - - Potential temperature is the temperature a parcel of air or sea water would have if moved adiabatically to sea level pressure. - - Pa @@ -3768,13 +4200,6 @@ totals are summed to obtain the index. "Equivalent reflectivity factor" is the radar reflectivity factor that is calculated from the measured radar return power assuming the target is composed of liquid water droplets whose diameter is less than one tenth of the radar wavelength, i.e., treating the droplets as Rayleigh scatterers. The actual radar reflectivity factor would depend on the size distribution and composition of the particles within the target volume and these are often unknown. - - K - - - - - m 10 @@ -3786,7 +4211,7 @@ totals are summed to obtain the index. K m2 kg-1 s-1 vorpot - + The Ertel potential vorticity is the scalar product of the atmospheric absolute vorticity vector and the gradient of potential temperature. It is a conserved quantity in the absence of friction and heat sources [AMS Glossary, http://glossary.ametsoc.org/wiki/Ertel_potential_vorticity]. A frequently used simplification of the general Ertel potential vorticity considers the Earth rotation vector to have only a vertical component. Then, only the vertical contribution of the scalar product is calculated. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). @@ -3821,7 +4246,14 @@ totals are summed to obtain the index. K - The overall temperature of a fire area due to contributions from smoldering and flaming biomass. A data variable containing the area affected by fire should be given the standard name fire_area. + The overall temperature of a fire area due to contributions from smoldering and flaming biomass. A data variable containing the area affected by fire should be given the standard name fire_area. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). + + + + 1 + + + A quality flag that reports the result of the Flat Line test, which checks for consecutively repeated values within a tolerance. The linkage between the data variable and this variable is achieved using the ancillary_variables attribute. There are standard names for other specific quality tests which take the form of X_quality_flag. Quality information that does not match any of the specific quantities should be given the more general standard name of quality_flag. @@ -3835,7 +4267,7 @@ totals are summed to obtain the index. 1 - "X_area_fraction" means the fraction of horizontal area occupied by X. A "floating ice shelf", sometimes called a "floating ice sheet", indicates where an ice sheet extending from a land area flows over sea water. + "Area fraction" is the fraction of a grid cell's horizontal area that has some characteristic of interest. It is evaluated as the area of interest divided by the grid cell area. It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. A "floating ice shelf", sometimes called a "floating ice sheet", indicates where an ice sheet extending from a land area flows over sea water. @@ -3870,7 +4302,7 @@ totals are summed to obtain the index. 1 - Fog means water droplets or minute ice crystals close to the surface which reduce visibility in air to less than 1000m. "X_area_fraction" means the fraction of horizontal area occupied by X. + "Area fraction" is the fraction of a grid cell's horizontal area that has some characteristic of interest. It is evaluated as the area of interest divided by the grid cell area. It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. Fog means water droplets or minute ice crystals close to the surface which reduce visibility in air to less than 1000m. @@ -3905,7 +4337,7 @@ totals are summed to obtain the index. 1 - "X_area_fraction" means the fraction of horizontal area occupied by X. Sea ice area fraction is area of the sea surface occupied by sea ice. The area threshold value must be specified by supplying a coordinate variable or scalar coordinate variable with the standard name of sea_ice_area_fraction. "Sea ice" means all ice floating in the sea which has formed from freezing sea water, rather than by other processes such as calving of land ice to form icebergs. + "Fraction of time" is the fraction of a time period defined by the bounds of the time coordinate variable for which a characteristic of interest exists. It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. "Area fraction" is the fraction of a grid cell's horizontal area that has some characteristic of interest. It is evaluated as the area of interest divided by the grid cell area. Sea ice area fraction is area of the sea surface occupied by sea ice. The area threshold value must be specified by supplying a coordinate variable or scalar coordinate variable with the standard name of sea_ice_area_fraction. "Sea ice" means all ice floating in the sea which has formed from freezing sea water, rather than by other processes such as calving of land ice to form icebergs. @@ -3929,6 +4361,13 @@ totals are summed to obtain the index. A lightning flash is a compound event, usually consisting of several discharges. Frequency is the number of oscillations of a wave, or the number of occurrences of an event, per unit time. + + kg m-3 + + + The density of the soil in its naturally frozen condition. Also known as frozen bulk density. The density of a substance is its mass per unit volume. + + kg m-2 @@ -3936,6 +4375,20 @@ totals are summed to obtain the index. "frozen_water" means ice. "Content" indicates a quantity per unit area. "Layer" means any layer with upper and lower boundaries that have constant values in some vertical coordinate. There must be a vertical coordinate variable indicating the extent of the layer(s). If the layers are model layers, the vertical coordinate can be model_level_number, but it is recommended to specify a physical coordinate (in a scalar or auxiliary coordinate variable) as well. Quantities defined for a soil layer must have a vertical coordinate variable with boundaries indicating the extent of the layer(s). + + Pa + + + The fugacity is the measured pressure (or partial pressure) of a real gas corrected for the intermolecular forces of that gas, which allows that corrected quantity to be treated like the pressure of an ideal gas in the ideal gas equation PV = nRT. The partial pressure of a dissolved gas in sea water is the partial pressure in air with which it would be in equilibrium. The partial pressure of a gaseous constituent of air is the pressure that it would exert if all other gaseous constituents were removed, assuming the volume, the temperature, and its number of moles remain unchanged. The chemical formula for carbon dioxide is CO2. + + + + 1 + + + A quality flag that reports the result of the Timing/Gap test, which checks that data have been received within the expected time window and have the correct time stamp. The linkage between the data variable and this variable is achieved using the ancillary_variables attribute. There are standard names for other specific quality tests which take the form of X_quality_flag. Quality information that does not match any of the specific quantities should be given the more general standard name of quality_flag. + + m @@ -4069,6 +4522,13 @@ totals are summed to obtain the index. Longitude is positive eastward; its units of degree_east (or equivalent) indicate this explicitly. In a latitude-longitude system defined with respect to a rotated North Pole, the standard name of grid_longitude should be used instead of longitude. Grid longitude is positive in the grid-eastward direction, but its units should be plain degree. + + mol m-3 s-1 + + + "Gross mole production" means the rate of creation of biomass per unit volume with no correction for respiration loss in terms of quantity of matter (moles). The phrase "expressed_as" is used in the construction "A_expressed_as_B", where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. "Prokaryotes" means all Bacteria and Archaea excluding photosynthetic cyanobacteria such as Synechococcus and Prochlorococcus or other separately named components of the prokaryotic population. + + kg m-2 s-1 @@ -4090,18 +4550,32 @@ totals are summed to obtain the index. "Production of carbon" means the production of biomass expressed as the mass of carbon which it contains. Gross primary production is the rate of synthesis of biomass from inorganic precursors by autotrophs ("producers"), for example, photosynthesis in plants or phytoplankton. The producers also respire some of this biomass and the difference is "net_primary_production". "Productivity" means production per unit area. The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. + + kg m-3 s-1 + + + "Gross production" means the rate of creation of biomass per unit volume with no correction for respiration. The phrase "expressed_as" is used in the construction "A_expressed_as_B", where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. "Prokaryotes" means all Bacteria and Archaea excluding photosynthetic cyanobacteria such as Synechococcus and Prochlorococcus or other separately named components of the prokaryotic population. + + + + 1 + + + A quality flag that reports the result of the Gross Range test, which checks that values are within reasonable range bounds. The linkage between the data variable and this variable is achieved using the ancillary_variables attribute. There are standard names for other specific quality tests which take the form of X_quality_flag. Quality information that does not match any of the specific quantities should be given the more general standard name of quality_flag. + + s-1 - The "gross rate of decrease in area fraction" is the fraction of a grid cell that transitions from a given area type per unit time, for example, as a result of land use changes. The quantity described by this standard name is a gross decrease because it includes only land where the use transitions away from the given area type and excludes land that transitions to that area type during the same period. The area type should be specified using a coordinate of scalar coordinate variable with standard name area_type. There is also a standard name for gross_rate_of_increase_in_area_fraction. + The "gross rate of decrease in area fraction" is the fraction of a grid cell that transitions from a given area type per unit time, for example, as a result of land use changes. The quantity described by this standard name is a gross decrease because it includes only land where the use transitions away from the given area type and excludes land that transitions to that area type during the same period. The area type should be specified using a coordinate of scalar coordinate variable with standard name area_type. There is also a standard name for gross_rate_of_increase_in_area_fraction. "Area fraction" is the fraction of a grid cell's horizontal area that has some characteristic of interest. It is evaluated as the area of interest divided by the grid cell area s-1 - The "rate of increase in area fraction" is the fraction of a grid cell that transitions to a given area type per unit time, for example, as a result of land use changes. The quantity described by this standard name is a gross increase because it includes only land where the use transitions to the given area type and excludes land that transitions away from that area type during the same period. The area type should be specified using a coordinate or scalar coordinate variable with standard name area_type. There is also a standard name for gross_rate_of_decrease_in_area_fraction. + The "rate of increase in area fraction" is the fraction of a grid cell that transitions to a given area type per unit time, for example, as a result of land use changes. The quantity described by this standard name is a gross increase because it includes only land where the use transitions to the given area type and excludes land that transitions away from that area type during the same period. The area type should be specified using a coordinate or scalar coordinate variable with standard name area_type. There is also a standard name for gross_rate_of_decrease_in_area_fraction. "Area fraction" is the fraction of a grid cell's horizontal area that has some characteristic of interest. It is evaluated as the area of interest divided by the grid cell area. @@ -4115,7 +4589,7 @@ totals are summed to obtain the index. 1 - "X_area_fraction" means the fraction of horizontal area occupied by X. "Grounded ice sheet" indicates where the ice sheet rests over bedrock and is thus grounded. It excludes ice-caps, glaciers and floating ice shelves. + "Area fraction" is the fraction of a grid cell's horizontal area that has some characteristic of interest. It is evaluated as the area of interest divided by the grid cell area. It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. "Grounded ice sheet" indicates where the ice sheet rests over bedrock and is thus grounded. It excludes ice-caps, glaciers and floating ice shelves. @@ -4125,6 +4599,20 @@ totals are summed to obtain the index. The ground_level_altitude is the geometric height of the upper boundary of the solid Earth above the geoid, which is the reference geopotential surface. The geoid is similar to mean sea level. + + degree + + + The slope angle is the angle (in degrees) measured between the ground (earth) surface plane and a flat, horizontal surface. + + + + degree + + + Commonly known as aspect, it is the azimuth (in degrees) of a terrain slope, taken as the direction with the greatest downslope change in elevation on the ground (earth) surface. The direction is a bearing in the usual geographical sense, measured positive clockwise from due north. + + 1 @@ -4139,11 +4627,11 @@ totals are summed to obtain the index. Diatoms are phytoplankton with an external skeleton made of silica. Phytoplankton are algae that grow where there is sufficient light to support photosynthesis. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Irradiance" means the power per unit area (called radiative flux in other standard names), the area being normal to the direction of flow of the radiant energy. Solar irradiance is essential to the photosynthesis reaction and its presence promotes the growth of phytoplankton populations. "Growth limitation due to solar irradiance" means the ratio of the growth rate of a species population in the environment (where the amount of sunlight reaching a location may be limited) to the theoretical growth rate if there were no such limit on solar irradiance. - + 1 - In ocean modelling, diazotrophs are phytoplankton of the phylum cyanobacteria distinct from other phytoplankton groups in their ability to fix nitrogen gas in addition to nitrate and ammonium. Phytoplankton are algae that grow where there is sufficient light to support photosynthesis. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Irradiance" means the power per unit area (called radiative flux in other standard names), the area being normal to the direction of flow of the radiant energy. Solar irradiance is essential to the photosynthesis reaction and its presence promotes the growth of phytoplankton populations. "Growth limitation due to solar irradiance" means the ratio of the growth rate of a species population in the environment (where the amount of sunlight reaching a location may be limited) to the theoretical growth rate if there were no such limit on solar irradiance. + "Growth limitation due to solar irradiance" means the ratio of the growth rate of a biological population in the environment (where the amount of sunlight reaching a location may be limited) to the theoretical growth rate if there were no such limit on solar irradiance. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Irradiance" means the power per unit area (called radiative flux in other standard names), the area being normal to the direction of flow of the radiant energy. Solar irradiance is essential to the photosynthesis reaction and its presence promotes the growth of phytoplankton populations. Phytoplankton are algae that grow where there is sufficient light to support photosynthesis. Diazotrophic phytoplankton are phytoplankton (predominantly from Phylum Cyanobacteria) that are able to fix molecular nitrogen (gas or solute) in addition to nitrate and ammonium. @@ -4195,11 +4683,11 @@ totals are summed to obtain the index. A period is an interval of time, or the time-period of an oscillation. - + W m-2 hfcorr - Flux correction is also called "flux adjustment". A positive flux correction is downward i.e. added to the ocean. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. + A positive flux adjustment is downward i.e. added to the ocean. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. @@ -4237,6 +4725,13 @@ totals are summed to obtain the index. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Snow thermodynamics" refers to the addition or subtraction of mass due to surface and basal fluxes, i.e., due to melting, sublimation and fusion. + + K + + + Air temperature is the bulk temperature of the air, not the surface (skin) temperature. The quantity with standard name heat_index_of_air_temperature is the perceived air temperature when relative humidity is taken into consideration (which makes it feel hotter than the actual air temperature). Heat index is only defined when the ambient air temperature is at or above 299.817 K. References: https://www.weather.gov/safety/heat-index; WMO codes registry entry http://codes.wmo.int/grib2/codeflag/4.2/_0-0-12. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). + + m @@ -4297,10 +4792,10 @@ totals are summed to obtain the index. 1 - High type clouds are: Cirrus, Cirrostratus, Cirrocumulus. "X_area_fraction" means the fraction of horizontal area occupied by X. Cloud area fraction is also called "cloud amount" and "cloud cover". X_type_cloud_area_fraction is generally determined on the basis of cloud type, though Numerical Weather Prediction (NWP) models often calculate them based on the vertical location of the cloud. + "Area fraction" is the fraction of a grid cell's horizontal area that has some characteristic of interest. It is evaluated as the area of interest divided by the grid cell area. It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. High type clouds are: Cirrus, Cirrostratus, Cirrocumulus. X_type_cloud_area_fraction is generally determined on the basis of cloud type, though Numerical Weather Prediction (NWP) models often calculate them based on the vertical location of the cloud. For the cloud area fraction between specified levels in the atmosphere, standard names including "cloud_area_fraction_in_atmosphere_layer" are used. Standard names referring only to "cloud_area_fraction" should be used for quantities for the whole atmosphere column. Cloud area fraction is also called "cloud amount" and "cloud cover". - + 1 @@ -4339,14 +4834,21 @@ totals are summed to obtain the index. 1 - "X_area_fraction" means the fraction of horizontal area occupied by X. Cloud area fraction is also called "cloud amount" and "cloud cover". The cloud area fraction is for the whole atmosphere column, as seen from the surface or the top of the atmosphere. The cloud area fraction in a layer of the atmosphere has the standard name cloud_area_fraction_in_atmosphere_layer. + "Area fraction" is the fraction of a grid cell's horizontal area that has some characteristic of interest. It is evaluated as the area of interest divided by the grid cell area. It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. The cloud area fraction is for the whole atmosphere column, as seen from the surface or the top of the atmosphere. For the cloud area fraction between specified levels in the atmosphere, standard names including "cloud_area_fraction_in_atmosphere_layer" are used. Standard names also exist for high, medium and low cloud types. Cloud area fraction is also called "cloud amount" and "cloud cover". 1 - "Layer" means any layer with upper and lower boundaries that have constant values in some vertical coordinate. There must be a vertical coordinate variable indicating the extent of the layer(s). If the layers are model layers, the vertical coordinate can be "model_level_number", but it is recommended to specify a physical coordinate (in a scalar or auxiliary coordinate variable) as well. "X_area_fraction" means the fraction of horizontal area occupied by X. Cloud area fraction is also called "cloud amount" and "cloud cover". + "Area fraction" is the fraction of a grid cell's horizontal area that has some characteristic of interest. It is evaluated as the area of interest divided by the grid cell area. It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. "Layer" means any layer with upper and lower boundaries that have constant values in some vertical coordinate. There must be a vertical coordinate variable indicating the extent of the layer(s). If the layers are model layers, the vertical coordinate can be "model_level_number", but it is recommended to specify a physical coordinate (in a scalar or auxiliary coordinate variable) as well. Standard names also exist for high, medium and low cloud types. Standard names referring only to "cloud_area_fraction" should be used for quantities for the whole atmosphere column. Cloud area fraction is also called "cloud amount" and "cloud cover". + + + + 1 + + + ice_volume_in_frozen_ground_in_excess_of_pore_volume_in_unfrozen_ground_expressed_as_fraction_of_frozen_ground_volume represents the fractional amount of "excess ice" in frozen ground. Excess ice is the volume of ice in the ground which exceeds the total pore volume that the ground would have under natural unfrozen conditions. Due to the presence of ground ice, the total water content of a frozen soil may exceed that corresponding to its normally consolidated state when unfrozen. As a result, upon thawing, a soil containing excess ice will settle under its own weight until it attains its consolidated state. Reference: van Everdingen, R. O. editor 1998: Multi-language glossary of permafrost and related ground ice terms. International Permafrost Association. @@ -4356,6 +4858,13 @@ totals are summed to obtain the index. "Water" means water in all phases. "River" refers to water in the fluvial system (stream and floodplain). + + m s-1 + + + Sea water velocity is a vector quantity that is the speed at which water travels in a specified direction. The "indicative error" is an estimate of the quality of a sea water velocity profile measured using an ADCP (acoustic doppler current profiler). It is determined by the difference between the vertical velocity calculated from two 3-beam solutions. The parameter is frequently referred to as the "error velocity". + + @@ -4363,25 +4872,25 @@ totals are summed to obtain the index. An auxiliary coordinate variable with a standard name of institution contains string values which specify where the original data, with which the coordinate variable is associated, were produced. The use of institution as the standard name for an auxiliary coordinate variable permits the aggregation of data from multiple institutions within a single data file. - + kg degree_C m-2 - The phrase "integral_wrt_X_of_Y" means int Y dX. To specify the limits of the integral the data variable should have an axis for X and associated coordinate bounds. If no axis for X is associated with the data variable, or no coordinate bounds are specified, it is assumed that the integral is calculated over the entire vertical extent of the medium, e.g, if the medium is air the integral is assumed to be calculated over the full depth of the atmosphere. The phrase "wrt" means "with respect to". Depth is the vertical distance below the surface. The phrase "product_of_X_and_Y" means X*Y. Sea water density is the in-situ density (not the potential density). For Boussinesq models, density is the constant Boussinesq reference density, a quantity which has the standard name reference_sea_water_density_for_boussinesq_approximation. Conservative Temperature is defined as part of the Thermodynamic Equation of Seawater 2010 (TEOS-10) which was adopted in 2010 by the International Oceanographic Commission (IOC). Conservative Temperature is specific potential enthalpy (which has the standard name sea_water_specific_potential_enthalpy) divided by a fixed value of the specific heat capacity of sea water, namely cp_0 = 3991.86795711963 J kg-1 K-1. Conservative Temperature is a more accurate measure of the "heat content" of sea water, by a factor of one hundred, than is potential temperature. Because of this, it can be regarded as being proportional to the heat content of sea water per unit mass. Reference: www.teos-10.org; McDougall, 2003 doi: 10.1175/1520-0485(2003)033<0945:PEACOV>2.0.CO;2. + The phrase "integral_wrt_X_of_Y" means int Y dX. To specify the limits of the integral the data variable should have an axis for X and associated coordinate bounds. If no axis for X is associated with the data variable, or no coordinate bounds are specified, it is assumed that the integral is calculated over the entire vertical extent of the medium, e.g, if the medium is air the integral is assumed to be calculated over the full depth of the atmosphere. The phrase "wrt" means "with respect to". Depth is the vertical distance below the surface. The phrase "product_of_X_and_Y" means X*Y. Conservative Temperature is defined as part of the Thermodynamic Equation of Seawater 2010 (TEOS-10) which was adopted in 2010 by the International Oceanographic Commission (IOC). Conservative Temperature is specific potential enthalpy (which has the standard name sea_water_specific_potential_enthalpy) divided by a fixed value of the specific heat capacity of sea water, namely cp_0 = 3991.86795711963 J kg-1 K-1. Conservative Temperature is a more accurate measure of the "heat content" of sea water, by a factor of one hundred, than is potential temperature. Because of this, it can be regarded as being proportional to the heat content of sea water per unit mass. Reference: www.teos-10.org; McDougall, 2003 doi: 10.1175/1520-0485(2003)033<0945:PEACOV>2.0.CO;2. Sea water density is the in-situ density (not the potential density). For Boussinesq models, density is the constant Boussinesq reference density, a quantity which has the standard name reference_sea_water_density_for_boussinesq_approximation. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). - + kg degree_C m-2 - The phrase "integral_wrt_X_of_Y" means int Y dX. To specify the limits of the integral the data variable should have an axis for X and associated coordinate bounds. If no axis for X is associated with the data variable, or no coordinate bounds are specified, it is assumed that the integral is calculated over the entire vertical extent of the medium, e.g, if the medium is air the integral is assumed to be calculated over the full depth of the atmosphere. The phrase "wrt" means "with respect to". The phrase "product_of_X_and_Y" means X*Y. Depth is the vertical distance below the surface. Sea water density is the in-situ density (not the potential density). For Boussinesq models, density is the constant Boussinesq reference density, a quantity which has the standard name reference_sea_water_density_for_boussinesq_approximation. Potential temperature is the temperature a parcel of air or sea water would have if moved adiabatically to sea level pressure. + The phrase "integral_wrt_X_of_Y" means int Y dX. To specify the limits of the integral the data variable should have an axis for X and associated coordinate bounds. If no axis for X is associated with the data variable, or no coordinate bounds are specified, it is assumed that the integral is calculated over the entire vertical extent of the medium, e.g, if the medium is air the integral is assumed to be calculated over the full depth of the atmosphere. The phrase "wrt" means "with respect to". The phrase "product_of_X_and_Y" means X*Y. Depth is the vertical distance below the surface. Potential temperature is the temperature a parcel of air or sea water would have if moved adiabatically to sea level pressure. Sea water density is the in-situ density (not the potential density). For Boussinesq models, density is the constant Boussinesq reference density, a quantity which has the standard name reference_sea_water_density_for_boussinesq_approximation. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). - + 1e-3 kg m-2 - The phrase "integral_wrt_X_of_Y" means int Y dX. To specify the limits of the integral the data variable should have an axis for X and associated coordinate bounds. If no axis for X is associated with the data variable, or no coordinate bounds are specified, it is assumed that the integral is calculated over the entire vertical extent of the medium, e.g, if the medium is air the integral is assumed to be calculated over the full depth of the atmosphere. The phrase "wrt" means "with respect to". The phrase "product_of_X_and_Y" means X*Y. Depth is the vertical distance below the surface. Sea water density is the in-situ density (not the potential density). For Boussinesq models, density is the constant Boussinesq reference density, a quantity which has the standard name reference_sea_water_density_for_boussinesq_approximation. Sea water salinity is the salt content of sea water, often on the Practical Salinity Scale of 1978. However, the unqualified term 'salinity' is generic and does not necessarily imply any particular method of calculation. The units of salinity are dimensionless and the units attribute should normally be given as 1e-3 or 0.001 i.e. parts per thousand. There are standard names for the more precisely defined salinity quantities sea_water_knudsen_salinity, S_K (used for salinity observations between 1901 and 1966), sea_water_cox_salinity, S_C (used for salinity observations between 1967 and 1977), sea_water_practical_salinity, S_P (used for salinity observations from 1978 to the present day), sea_water_absolute_salinity, S_A, sea_water_preformed_salinity, S_*, and sea_water_reference_salinity. Practical Salinity is reported on the Practical Salinity Scale of 1978 (PSS-78), and is usually based on the electrical conductivity of sea water in observations since the 1960s. Conversion of data between the observed scales follows S_P = (S_K - 0.03) * (1.80655 / 1.805) and S_P = S_C, however the accuracy of the latter is dependent on whether chlorinity or conductivity was used to determine the S_C value, with this inconsistency driving the development of PSS-78. The more precise standard names should be used where appropriate for both modelled and observed salinities. In particular, the use of sea_water_salinity to describe salinity observations made from 1978 onwards is now deprecated in favor of the term sea_water_practical_salinity which is the salinity quantity stored by national data centers for post-1978 observations. The only exception to this is where the observed salinities are definitely known not to be recorded on the Practical Salinity Scale. Practical salinity units are dimensionless. The unit "parts per thousand" was used for sea_water_knudsen_salinity and sea_water_cox_salinity. + The phrase "integral_wrt_X_of_Y" means int Y dX. To specify the limits of the integral the data variable should have an axis for X and associated coordinate bounds. If no axis for X is associated with the data variable, or no coordinate bounds are specified, it is assumed that the integral is calculated over the entire vertical extent of the medium, e.g, if the medium is air the integral is assumed to be calculated over the full depth of the atmosphere. The phrase "wrt" means "with respect to". The phrase "product_of_X_and_Y" means X*Y. Depth is the vertical distance below the surface. Sea water salinity is the salt content of sea water, often on the Practical Salinity Scale of 1978. However, the unqualified term 'salinity' is generic and does not necessarily imply any particular method of calculation. The units of salinity are dimensionless and the units attribute should normally be given as 1e-3 or 0.001 i.e. parts per thousand. There are standard names for the more precisely defined salinity quantities sea_water_knudsen_salinity, S_K (used for salinity observations between 1901 and 1966), sea_water_cox_salinity, S_C (used for salinity observations between 1967 and 1977), sea_water_practical_salinity, S_P (used for salinity observations from 1978 to the present day), sea_water_absolute_salinity, S_A, sea_water_preformed_salinity, S_*, and sea_water_reference_salinity. Practical Salinity is reported on the Practical Salinity Scale of 1978 (PSS-78), and is usually based on the electrical conductivity of sea water in observations since the 1960s. Conversion of data between the observed scales follows S_P = (S_K - 0.03) * (1.80655 / 1.805) and S_P = S_C, however the accuracy of the latter is dependent on whether chlorinity or conductivity was used to determine the S_C value, with this inconsistency driving the development of PSS-78. The more precise standard names should be used where appropriate for both modelled and observed salinities. In particular, the use of sea_water_salinity to describe salinity observations made from 1978 onwards is now deprecated in favor of the term sea_water_practical_salinity which is the salinity quantity stored by national data centers for post-1978 observations. The only exception to this is where the observed salinities are definitely known not to be recorded on the Practical Salinity Scale. Practical salinity units are dimensionless. The unit "parts per thousand" was used for sea_water_knudsen_salinity and sea_water_cox_salinity. Sea water density is the in-situ density (not the potential density). For Boussinesq models, density is the constant Boussinesq reference density, a quantity which has the standard name reference_sea_water_density_for_boussinesq_approximation. @@ -4395,7 +4904,7 @@ totals are summed to obtain the index. K m - The phrase "integral_wrt_X_of_Y" means int Y dX. To specify the limits of the integral the data variable should have an axis for X and associated coordinate bounds. If no axis for X is associated with the data variable, or no coordinate bounds are specified, it is assumed that the integral is calculated over the entire vertical extent of the medium, e.g, if the medium is air the integral is assumed to be calculated over the full depth of the atmosphere. "wrt" means with respect to. Depth is the vertical distance below the surface. Sea water temperature is the in situ temperature of the sea water. For observed data, depending on the period during which the observation was made, the measured in situ temperature was recorded against standard "scales". These historical scales include the International Practical Temperature Scale of 1948 (IPTS-48; 1948-1967), the International Practical Temperature Scale of 1968 (IPTS-68, Barber, 1969; 1968-1989) and the International Temperature Scale of 1990 (ITS-90, Saunders 1990; 1990 onwards). Conversion of data between these scales follows t68 = t48 - (4.4 x 10e-6) * t48(100 - t - 48); t90 = 0.99976 * t68. Observations made prior to 1948 (IPTS-48) have not been documented and therefore a conversion cannot be certain. Differences between t90 and t68 can be up to 0.01 at temperatures of 40 C and above; differences of 0.002-0.007 occur across the standard range of ocean temperatures (-10 - 30 C). The International Equation of State of Seawater 1980 (EOS-80, UNESCO, 1981) and the Practical Salinity Scale (PSS-78) were both based on IPTS-68, while the Thermodynamic Equation of Seawater 2010 (TEOS-10) is based on ITS-90. References: Barber, 1969, doi: 10.1088/0026-1394/5/2/001; UNESCO, 1981; Saunders, 1990, WOCE Newsletter, 10, September 1990. + The phrase "integral_wrt_X_of_Y" means int Y dX. To specify the limits of the integral the data variable should have an axis for X and associated coordinate bounds. If no axis for X is associated with the data variable, or no coordinate bounds are specified, it is assumed that the integral is calculated over the entire vertical extent of the medium, e.g, if the medium is air the integral is assumed to be calculated over the full depth of the atmosphere. "wrt" means with respect to. Depth is the vertical distance below the surface. Sea water temperature is the in situ temperature of the sea water. For observed data, depending on the period during which the observation was made, the measured in situ temperature was recorded against standard "scales". These historical scales include the International Practical Temperature Scale of 1948 (IPTS-48; 1948-1967), the International Practical Temperature Scale of 1968 (IPTS-68, Barber, 1969; 1968-1989) and the International Temperature Scale of 1990 (ITS-90, Saunders 1990; 1990 onwards). Conversion of data between these scales follows t68 = t48 - (4.4 x 10e-6) * t48(100 - t - 48); t90 = 0.99976 * t68. Observations made prior to 1948 (IPTS-48) have not been documented and therefore a conversion cannot be certain. Differences between t90 and t68 can be up to 0.01 at temperatures of 40 C and above; differences of 0.002-0.007 occur across the standard range of ocean temperatures (-10 - 30 C). The International Equation of State of Seawater 1980 (EOS-80, UNESCO, 1981) and the Practical Salinity Scale (PSS-78) were both based on IPTS-68, while the Thermodynamic Equation of Seawater 2010 (TEOS-10) is based on ITS-90. References: Barber, 1969, doi: 10.1088/0026-1394/5/2/001; UNESCO, 1981; Saunders, 1990, WOCE Newsletter, 10, September 1990. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). @@ -4412,6 +4921,13 @@ totals are summed to obtain the index. The phrase "integral_wrt_X_of_Y" means int Y dX. To specify the limits of the integral the data variable should have an axis for X and associated coordinate bounds. If no axis for X is associated with the data variable, or no coordinate bounds are specified, it is assumed that the integral is calculated over the entire vertical extent of the medium, e.g, if the medium is air the integral is assumed to be calculated over the full depth of the atmosphere. "wrt" means with respect to. "tendency_of_X" means derivative of X with respect to time. Depth is the vertical distance below the surface. 'sea_water_alkalinity_expressed_as_mole_equivalent' is the total alkalinity equivalent concentration (including carbonate, nitrogen, silicate, and borate components). The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. + + kg m-1 s-1 + + + Eastward vertically-integrated moisture flux per unit length in latitude. "Eastward" indicates a vector component which is positive when directed eastward (negative westward). Height is the vertical distance above the surface. Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name "upward_air_velocity"). The phrase "product_of_X_and_Y" means X*Y. The abbreviation "wrt" means "with respect to". The phrase "integral_wrt_X_of_Y" means int Y dX. To specify the limits of the integral the data variable should have an axis for X and associated coordinate bounds. If no axis for X is associated with the data variable, or no coordinate bounds are specified, it is assumed that the integral is calculated over the entire vertical extent of the medium, e.g, if the medium is air the integral is assumed to be calculated over the full depth of the atmosphere. "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". + + m2 s-1 @@ -4419,6 +4935,13 @@ totals are summed to obtain the index. The phrase "integral_wrt_X_of_Y" means int Y dX. To specify the limits of the integral the data variable should have an axis for X and associated coordinate bounds. If no axis for X is associated with the data variable, or no coordinate bounds are specified, it is assumed that the integral is calculated over the entire vertical extent of the medium, e.g, if the medium is air the integral is assumed to be calculated over the full depth of the atmosphere. The phrase "wrt" means "with respect to". Height is the vertical distance above the surface. The phrase "product_of_X_and_Y" means X*Y. Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name "upward_air_velocity".) "Eastward" indicates a vector component which is positive when directed eastward (negative westward). Specific humidity is the mass fraction of water vapor in (moist) air. + + kg m-1 s-1 + + + Northward vertically-integrated moisture flux per unit length in longitude. "Northward" indicates a vector component which is positive when directed northward (negative southward). Height is the vertical distance above the surface. Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name "upward_air_velocity"). The phrase "product_of_X_and_Y" means X*Y. The abbreviation "wrt" means "with respect to". The phrase "integral_wrt_X_of_Y" means int Y dX. To specify the limits of the integral the data variable should have an axis for X and associated coordinate bounds. If no axis for X is associated with the data variable, or no coordinate bounds are specified, it is assumed that the integral is calculated over the entire vertical extent of the medium, e.g, if the medium is air the integral is assumed to be calculated over the full depth of the atmosphere. "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". + + m2 s-1 @@ -4430,14 +4953,14 @@ totals are summed to obtain the index. K s - The phrase "integral_wrt_X_of_Y" means int Y dX. The data variable should have an axis for X specifying the limits of the integral as bounds. "wrt" means with respect to. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. The air temperature deficit is the air temperature threshold minus the air temperature, where only positive values are included in the integral. Its integral with respect to time is often called after its units of "degree-days". The air_temperature variable, which is the data variable of the integral should have a scalar coordinate variable or a size-one coordinate variable with the standard name of air_temperature_threshold, to indicate the threshold. + The phrase "integral_wrt_X_of_Y" means int Y dX. The data variable should have an axis for X specifying the limits of the integral as bounds. "wrt" means with respect to. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. The air temperature deficit is the air temperature threshold minus the air temperature, where only positive values are included in the integral. Its integral with respect to time is often called after its units of "degree-days". The air_temperature variable, which is the data variable of the integral should have a scalar coordinate variable or a size-one coordinate variable with the standard name of air_temperature_threshold, to indicate the threshold. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K s - The phrase "integral_wrt_X_of_Y" means int Y dX. The data variable should have an axis for X specifying the limits of the integral as bounds. "wrt" means with respect to. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. The air temperature excess is the air temperature minus the air temperature threshold, where only positive values are included in the integral. Its integral with respect to time is often called after its units of "degree-days". The air_temperature variable, which is the data variable of the integral should have a scalar coordinate variable or a size-one coordinate variable with the standard name of air_temperature_threshold, to indicate the threshold. + The phrase "integral_wrt_X_of_Y" means int Y dX. The data variable should have an axis for X specifying the limits of the integral as bounds. "wrt" means with respect to. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. The air temperature excess is the air temperature minus the air temperature threshold, where only positive values are included in the integral. Its integral with respect to time is often called after its units of "degree-days". The air_temperature variable, which is the data variable of the integral should have a scalar coordinate variable or a size-one coordinate variable with the standard name of air_temperature_threshold, to indicate the threshold. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). @@ -6992,7 +7515,7 @@ totals are summed to obtain the index. Pa s - The phrase "integral_wrt_X_of_Y" means int Y dX. The data variable should have an axis for X specifying the limits of the integral as bounds. "wrt" means with respect to. The surface called "surface" means the lower boundary of the atmosphere. "Eastward" indicates a vector component which is positive when directed eastward (negative westward). "Downward" indicates a vector component which is positive when directed downward (negative upward). "Downward eastward" indicates the ZX component of a tensor. A downward eastward stress is a downward flux of eastward momentum, which accelerates the lower medium eastward and the upper medium westward. The surface downward stress is the wind stress on the surface. + The phrase "integral_wrt_X_of_Y" means int Y dX. The data variable should have an axis for X specifying the limits of the integral as bounds. The abbreviation "wrt" means "with respect to". The surface called "surface" means the lower boundary of the atmosphere. "Surface stress" means the shear stress (force per unit area) exerted by the wind at the surface. A downward stress is a downward flux of momentum. Over large bodies of water, wind stress can drive near-surface currents. "Downward" indicates a vector component which is positive when directed downward (negative upward). "Eastward" indicates a vector component which is positive when directed eastward (negative westward). "Downward eastward" indicates the ZX component of a tensor. A downward eastward stress is a downward flux of eastward momentum, which accelerates the lower medium eastward and the upper medium westward. @@ -7006,7 +7529,7 @@ totals are summed to obtain the index. Pa s - The phrase "integral_wrt_X_of_Y" means int Y dX. The data variable should have an axis for X specifying the limits of the integral as bounds. "wrt" means with respect to. The surface called "surface" means the lower boundary of the atmosphere. "Northward" indicates a vector component which is positive when directed northward (negative southward). "Downward" indicates a vector component which is positive when directed downward (negative upward). "Downward northward" indicates the ZY component of a tensor. A downward northward stress is a downward flux of northward momentum, which accelerates the lower medium northward and the upper medium southward. The surface downward stress is the wind stress on the surface. + The phrase "integral_wrt_X_of_Y" means int Y dX. The data variable should have an axis for X specifying the limits of the integral as bounds. The abbreviation "wrt" means "with respect to". The surface called "surface" means the lower boundary of the atmosphere. "Surface stress" means the shear stress (force per unit area) exerted by the wind at the surface. A downward stress is a downward flux of momentum. Over large bodies of water, wind stress can drive near-surface currents. "Downward" indicates a vector component which is positive when directed downward (negative upward). "Northward" indicates a vector component which is positive when directed northward (negative southward). "Downward northward" indicates the ZY component of a tensor. A downward northward stress is a downward flux of northward momentum, which accelerates the lower medium northward and the upper medium southward. @@ -7072,11 +7595,11 @@ totals are summed to obtain the index. Diatoms are phytoplankton with an external skeleton made of silica. Phytoplankton are algae that grow where there is sufficient light to support photosynthesis. "Iron growth limitation" means the ratio of the growth rate of a species population in the environment (where there is a finite availability of iron) to the theoretical growth rate if there were no such limit on iron availability. - + 1 - In ocean modelling, diazotrophs are phytoplankton of the phylum cyanobacteria distinct from other phytoplankton groups in their ability to fix nitrogen gas in addition to nitrate and ammonium. Phytoplankton are algae that grow where there is sufficient light to support photosynthesis. "Iron growth limitation" means the ratio of the growth rate of a species population in the environment (where there is a finite availability of iron) to the theoretical growth rate if there were no such limit on iron availability. + "Iron growth limitation" means the ratio of the growth rate of a biological population in the environment (where there is a finite availability of iron) to the theoretical growth rate if there were no such limit on iron availability. Phytoplankton are algae that grow where there is sufficient light to support photosynthesis. Diazotrophic phytoplankton are phytoplankton (predominantly from Phylum Cyanobacteria) that are able to fix molecular nitrogen (gas or solute) in addition to nitrate and ammonium. @@ -7097,7 +7620,7 @@ totals are summed to obtain the index. 1 clisccp - "X_area_fraction" means the fraction of horizontal area occupied by X. "X_area" means the horizontal area occupied by X within the grid cell. Cloud area fraction is also called "cloud amount" and "cloud cover". The cloud area fraction is for the whole atmosphere column, as seen from the surface or the top of the atmosphere. The cloud area fraction in a layer of the atmosphere has the standard name cloud_area_fraction_in_atmosphere_layer. The ISCCP cloud area fraction is diagnosed from atmosphere model output by the ISCCP simulator software in such a way as to be comparable with the observational diagnostics of ISCCP (the International Satellite Cloud Climatology Project). + "Area fraction" is the fraction of a grid cell's horizontal area that has some characteristic of interest. It is evaluated as the area of interest divided by the grid cell area. It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. The cloud area fraction is for the whole atmosphere column, as seen from the surface or the top of the atmosphere. For the cloud area fraction between specified levels in the atmosphere, standard names including "cloud_area_fraction_in_atmosphere_layer" are used. Standard names also exist for high, medium and low cloud types. The ISCCP cloud area fraction is diagnosed from atmosphere model output by the ISCCP simulator software in such a way as to be comparable with the observational diagnostics of ISCCP (the International Satellite Cloud Climatology Project). Cloud area fraction is also called "cloud amount" and "cloud cover". @@ -7135,6 +7658,13 @@ totals are summed to obtain the index. "shortwave" means shortwave radiation. Radiance is the radiative flux in a particular direction, per unit of solid angle. If radiation is isotropic, the radiance is independent of direction, so the direction should not be specified. If the radiation is directionally dependent, a standard name of upwelling or downwelling radiance should be chosen instead. + + 1 + + + The Keetch Byram Drought Index (KBDI) is a numerical drought index ranging from 0 to 800 that estimates the cumulative moisture deficiency in soil. It is a cumulative index. It is a function of maximum temperature and precipitation over the previous 24 hours. + + J m-2 @@ -7160,14 +7690,14 @@ totals are summed to obtain the index. s-1 38 - "tendency_of_X" means derivative of X with respect to time. The Lagrangian tendency of a quantity is its rate of change following the motion of the fluid, also called the "material derivative" or "convective derivative". The Lagrangian tendency of sigma plays the role of the upward component of air velocity when the atmosphere sigma coordinate (a dimensionless atmosphere vertical coordinate) is being used as the vertical coordinate. If the vertical air velocity is upwards, it is negative when expressed as a tendency of sigma; downwards is positive. See Appendix D of the CF convention for information about dimensionless vertical coordinates. + The phrase "tendency_of_X" means derivative of X with respect to time. The Lagrangian tendency of a quantity is its rate of change following the motion of the fluid, also called the "material derivative" or "convective derivative". The Lagrangian tendency of sigma plays the role of the upward component of air velocity when the atmosphere sigma coordinate (a dimensionless atmosphere vertical coordinate) is being used as the vertical coordinate. If the vertical air velocity is upwards, it is negative when expressed as a tendency of sigma; downwards is positive. See Appendix D of the CF convention for information about parametric vertical coordinates. 1 81 sftlf - "X_area_fraction" means the fraction of horizontal area occupied by X. "X_area" means the horizontal area occupied by X within the grid cell. + "Area fraction" is the fraction of a grid cell's horizontal area that has some characteristic of interest. It is evaluated as the area of interest divided by the grid cell area. It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. @@ -7181,14 +7711,14 @@ totals are summed to obtain the index. - A variable with the standard name of land_cover_sccs contains strings which indicate the nature of the surface, e.g. cropland_..., tree_... . These strings are standardised. Values must be combinations of classifiers from the Land Cover Classification System (LCCS; Di Gregorio A., 2005, UN Land Cover Classification System (LCCS) - Classification concepts and user manual for Software version 2; available at www.fao.org/DOCREP/003/X0596E/X0596e00.htm). + A variable with the standard name of land_cover_lccs contains strings which indicate the nature of the surface, e.g. cropland_..., tree_... . Each string should represent a land cover class constructed using the Land Cover Classification System (LCCS; Di Gregorio A., 2005, UN Land Cover Classification System (LCCS) - Classification concepts and user manual for Software version 2; available at www.fao.org/DOCREP/003/X0596E/X0596e00.htm). String values should represent the classifiers used to define each class. 1 sftgif - "X_area_fraction" means the fraction of horizontal area occupied by X. "Land ice" means glaciers, ice-caps and ice-sheets resting on bedrock and also includes ice-shelves. + "Area fraction" is the fraction of a grid cell's horizontal area that has some characteristic of interest. It is evaluated as the area of interest divided by the grid cell area. It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. "Land ice" means glaciers, ice-caps and ice-sheets resting on bedrock and also includes ice-shelves. @@ -7216,7 +7746,7 @@ totals are summed to obtain the index. K - "Land ice" means glaciers, ice-caps and ice-sheets resting on bedrock and also includes ice-shelves. The standard name land_ice_basal_temperature means the temperature of the land ice at its lower boundary. + "Land ice" means glaciers, ice-caps and ice-sheets resting on bedrock and also includes ice-shelves. The standard name land_ice_basal_temperature means the temperature of the land ice at its lower boundary. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). @@ -7279,7 +7809,7 @@ totals are summed to obtain the index. kg - "Land ice not displacing sea water" means land ice that would not alter sea level if the ice were converted to water and added to the ocean. It excludes ice shelves (and any other sort of floating ice) and it excludes a fraction of grounded ice-sheet mass equivalent to the mass of any sea water it displaces. It includes glaciers and a portion of grounded ice-sheet mass exceeding the mass of any sea water displaced. The quantity with standard name land_ice_mass_not_displacing_sea_water is the total mass integrated over an area of land ice. The geographical extent of the ice over which the mass was calculated should be described by providing bounds on the horizontal coordinate variable or scalar with the standard name of "region" supplied according to section 6.1.1 of the CF convention. + "Land ice not displacing sea water" means land ice that would alter sea level if the ice were converted to water and added to the ocean. It excludes ice shelves (and any other sort of floating ice) and it excludes a fraction of grounded ice-sheet mass equivalent to the mass of any sea water it displaces. It includes glaciers and a portion of grounded ice-sheet mass exceeding the mass of any sea water displaced. The quantity with standard name land_ice_mass_not_displacing_sea_water is the total mass integrated over an area of land ice. The geographical extent of the ice over which the mass was calculated should be described by providing bounds on the horizontal coordinate variable or scalar with the standard name of "region" supplied according to section 6.1.1 of the CF convention. "Land ice not displacing sea water" is sometimes referred to as "ice above flotation" or "ice above floatation". @@ -7356,7 +7886,7 @@ totals are summed to obtain the index. K - "Land ice" means glaciers, ice-caps and ice-sheets resting on bedrock and also includes ice-shelves. + "Land ice" means glaciers, ice-caps and ice-sheets resting on bedrock and also includes ice-shelves. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). @@ -7436,6 +7966,27 @@ totals are summed to obtain the index. "Content" indicates a quantity per unit area. + + 1 + + + Left singular vectors of the matrix representing the logarithmic scale remote sensing averaging kernels (Weber 2019; Schneider et al., 2022) of the methane mole fractions obtained by a remote sensing observation (fractional changes of methane in the retrieved atmosphere relative to the fractional changes of methane in the true atmosphere, Rodgers 2000; Keppens et al., 2015). + + + + 1 + + + Left singular vectors of the matrix representing the remote sensing averaging kernels (Weber 2019; Schneider et al., 2022) of the methane mole fractions obtained by a remote sensing observation (changes of methane in the retrieved atmosphere relative to the changes of methane in the true atmosphere, Rodgers 2000). + + + + J kg-1 + + + The lightning_potential_index measures the potential for charge generation and separation that leads to lightning flashes in convective thunderstorms. It is derived from the model simulated grid-scale updraft velocity and the mass mixing-ratios of liquid water, cloud ice, snow, and graupel. + + J @@ -7447,14 +7998,14 @@ totals are summed to obtain the index. 1 - "X_area_fraction" means the fraction of horizontal area occupied by X. Cloud area fraction is also called "cloud amount" and "cloud cover". The cloud area fraction is for the whole atmosphere column, as seen from the surface or the top of the atmosphere. The cloud area fraction in a layer of the atmosphere has the standard name cloud_area_fraction_in_atmosphere_layer. The chemical formula for water is H2O. + "Area fraction" is the fraction of a grid cell's horizontal area that has some characteristic of interest. It is evaluated as the area of interest divided by the grid cell area. It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. The cloud area fraction is for the whole atmosphere column, as seen from the surface or the top of the atmosphere. For the cloud area fraction between specified levels in the atmosphere, standard names including "cloud_area_fraction_in_atmosphere_layer" are used. Standard names also exist for high, medium and low cloud types. "Cloud area fraction is also called "cloud amount" and "cloud cover". 1 - "Layer" means any layer with upper and lower boundaries that have constant values in some vertical coordinate. There must be a vertical coordinate variable indicating the extent of the layer(s). If the layers are model layers, the vertical coordinate can be "model_level_number", but it is recommended to specify a physical coordinate (in a scalar or auxiliary coordinate variable) as well. "X_area_fraction" means the fraction of horizontal area occupied by X. Cloud area fraction is also called "cloud amount" and "cloud cover". The chemical formula for water is H2O. + "Area fraction" is the fraction of a grid cell's horizontal area that has some characteristic of interest. It is evaluated as the area of interest divided by the grid cell area. It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. "Layer" means any layer with upper and lower boundaries that have constant values in some vertical coordinate. There must be a vertical coordinate variable indicating the extent of the layer(s). If the layers are model layers, the vertical coordinate can be "model_level_number", but it is recommended to specify a physical coordinate (in a scalar or auxiliary coordinate variable) as well. Standard names also exist for high, medium and low cloud types. Standard names referring only to "cloud_area_fraction" should be used for quantities for the whole atmosphere column. Cloud area fraction is also called "cloud amount" and "cloud cover". @@ -7475,14 +8026,14 @@ totals are summed to obtain the index. kg m-2 - "Content" indicates a quantity per unit area. The surface called "surface" means the lower boundary of the atmosphere. + "Content" indicates a quantity per unit area. Surface snow refers to the snow on the solid ground or on surface ice cover, but excludes, for example, falling snowflakes and snow on plants. kg m-2 s-1 - In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The phrase "surface_snow" means snow lying on the surface. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. + In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. Surface snow refers to the snow on the solid ground or on surface ice cover, but excludes, for example, falling snowflakes and snow on plants. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. @@ -7513,6 +8064,27 @@ totals are summed to obtain the index. "Content" indicates a quantity per unit area. "Litter" is dead plant material in or above the soil. It is distinct from coarse wood debris. The precise distinction between "fine" and "coarse" is model dependent. The sum of the quantities with standard names surface_litter_mass_content_of_nitrogen and subsurface_litter_mass_content_of_nitrogen has the standard name litter_mass_content_of_nitrogen. + + 1 + + + A quality flag that reports the result of the Location test, which checks that a location is within reasonable bounds. The linkage between the data variable and this variable is achieved using the ancillary_variables attribute. There are standard names for other specific quality tests which take the form of X_quality_flag. Quality information that does not match any of the specific quantities should be given the more general standard name of quality_flag. + + + + m-3 + + + The aerosol particle number size distribution is the number concentration of aerosol particles, normalised to the decadal logarithmic size interval the concentration applies to, as a function of particle diameter. A coordinate variable with the standard name of electrical_mobility_particle_diameter, aerodynamic_particle_diameter, or optical_particle_diameter should be specified to indicate that the property applies at specific particle sizes selected by the indicated method. To specify the relative humidity at which the particle sizes were selected, provide a scalar coordinate variable with the standard name of relative_humidity_for_aerosol_particle_size_selection. "log10_X" means common logarithm (i.e. base 10) of X. "stp" means standard temperature (0 degC) and pressure (101325 Pa). + + + + m-3 + + + The aerosol particle number size distribution is the number concentration of aerosol particles, normalised to the decadal logarithmic size interval the concentration applies to, as a function of particle diameter. A coordinate variable with the standard name of electrical_mobility_particle_diameter, aerodynamic_particle_diameter, or optical_particle_diameter should be specified to indicate that the property applies at specific particle sizes selected by the indicated method. To specify the relative humidity at which the particle sizes were selected, provide a scalar coordinate variable with the standard name of relative_humidity_for_aerosol_particle_size_selection. "log10_X" means common logarithm (i.e. base 10) of X. + + m-3 @@ -7520,6 +8092,13 @@ totals are summed to obtain the index. The cloud condensation nuclei number size distribution is the number concentration of aerosol particles, normalised to the decadal logarithmic size interval the concentration applies to, as a function of particle diameter, where the particle acts as condensation nucleus for liquid-phase clouds. A coordinate variable with the standard name of relative_humidity should be specified to indicate that the property refers to a specific supersaturation with respect to liquid water. A coordinate variable with the standard name of electrical_mobility_particle_diameter should be specified to indicate that the property applies at specific mobility particle sizes. To specify the relative humidity at which the particle sizes were selected, provide a scalar coordinate variable with the standard name of relative_humidity_for_aerosol_particle_size_selection. The ability of a particle to act as a condensation nucleus is determined by its size, chemical composition, and morphology. "log10_X" means common logarithm (i.e. base 10) of X. "stp" means standard temperature (0 degC) and pressure (101325 Pa). + + m-3 + + + The cloud condensation nuclei number size distribution is the number concentration of aerosol particles, normalised to the decadal logarithmic size interval the concentration applies to, as a function of particle diameter, where the particle acts as condensation nucleus for liquid-phase clouds. A coordinate variable with the standard name of relative_humidity should be specified to indicate that the property refers to a specific supersaturation with respect to liquid water. A coordinate variable with the standard name of electrical_mobility_particle_diameter should be specified to indicate that the property applies at specific mobility particle sizes. To specify the relative humidity at which the particle sizes were selected, provide a scalar coordinate variable with the standard name of relative_humidity_for_aerosol_particle_size_selection. The ability of a particle to act as a condensation nucleus is determined by its size, chemical composition, and morphology. "log10_X" means common logarithm (i.e. base 10) of X. + + degree_east @@ -7531,7 +8110,7 @@ totals are summed to obtain the index. 1 - Low type clouds are: Stratus, Stratocumulus, Cumulus, Cumulonimbus. "X_area_fraction" means the fraction of horizontal area occupied by X. Cloud area fraction is also called "cloud amount" and "cloud cover". X_type_cloud_area_fraction is generally determined on the basis of cloud type, though Numerical Weather Prediction (NWP) models often calculate them based on the vertical location of the cloud. + "Area fraction" is the fraction of a grid cell's horizontal area that has some characteristic of interest. It is evaluated as the area of interest divided by the grid cell area. It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. Low type clouds are: Stratus, Stratocumulus, Cumulus, Cumulonimbus. X_type_cloud_area_fraction is generally determined on the basis of cloud type, though Numerical Weather Prediction (NWP) models often calculate them based on the vertical location of the cloud. For the cloud area fraction between specified levels in the atmosphere, standard names including "cloud_area_fraction_in_atmosphere_layer" are used. Standard names referring only to "cloud_area_fraction" should be used for quantities for the whole atmosphere column. Cloud area fraction is also called "cloud amount" and "cloud cover". @@ -7657,7 +8236,7 @@ totals are summed to obtain the index. m E141 - The surface called "surface" means the lower boundary of the atmosphere. "lwe" means liquid water equivalent. "Amount" means mass per unit area. The construction lwe_thickness_of_X_amount or _content means the vertical extent of a layer of liquid water having the same mass per unit area. Surface amount refers to the amount on the ground, excluding that on the plant or vegetation canopy. + The abbreviation "lwe" means liquid water equivalent. "Amount" means mass per unit area. The construction lwe_thickness_of_X_amount or _content means the vertical extent of a layer of liquid water having the same mass per unit area. Surface snow amount refers to the amount on the solid ground or on surface ice cover, but excludes, for example, falling snowflakes and snow on plants. @@ -7674,6 +8253,13 @@ totals are summed to obtain the index. "lwe" means liquid water equivalent. "Water" means water in all phases. Evaporation is the conversion of liquid or solid into vapor. (The conversion of solid alone into vapor is called "sublimation".) + + m s-1 + + + The quantity with standard name magnitude_of_air_velocity_relative_to_sea_water is the speed of the motion of the air relative to the near-surface current, usually derived from vectors. The components of the relative velocity vector have standard names eastward_air_velocity_relative_to_sea_water and northward_air_velocity_relative_to_sea_water. A vertical coordinate variable or scalar coordinate variable with standard name "depth" should be used to indicate the depth of sea water velocity used in the calculation. Similarly, a vertical coordinate variable or scalar coordinate with standard name "height" should be used to indicate the height of the the wind component. + + m @@ -7713,70 +8299,133 @@ totals are summed to obtain the index. Pa - The surface called "surface" means the lower boundary of the atmosphere. "magnitude_of_X" means magnitude of a vector X. "Downward" indicates a vector component which is positive when directed downward (negative upward). + The phrase "magnitude_of_X" means magnitude of a vector X. The surface called "surface" means the lower boundary of the atmosphere. "Surface stress" means the shear stress (force per unit area) exerted by the wind at the surface. A downward stress is a downward flux of momentum. Over large bodies of water, wind stress can drive near-surface currents. "Downward" indicates a vector component which is positive when directed downward (negative upward). - + kg m-3 - Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for acetic_acid is CH3COOH. The IUPAC name for acetic acid is ethanoic acid. + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula of 19’-butanoyloxyfucoxanthin is C46H64O8. The equivalent term in the NERC P01 Parameter Usage Vocabulary may be found at http://vocab.nerc.ac.uk/collection/P01/current/BUTAXXXX/1/. - + kg m-3 - Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for aceto-nitrile is CH3CN. The IUPAC name for aceto-nitrile is ethanenitrile. + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula of 19'-hexanoyloxyfucoxanthin is C48H68O8. The equivalent term in the NERC P01 Parameter Usage Vocabulary may be found at http://vocab.nerc.ac.uk/collection/P01/current/HEXAXXXX/2/. - + kg m-3 - Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. Alkanes are saturated hydrocarbons, i.e. they do not contain any chemical double bonds. Alkanes contain only hydrogen and carbon combined in the general proportions C(n)H(2n+2); "alkanes" is the term used in standard names to describe the group of chemical species having this common structure that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. Standard names exist for some individual alkane species, e.g., methane and ethane. + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. The absorption equivalent black carbon mass concentration is obtained by conversion from the particle light absorption coefficient with a suitable mass absorption cross-section. Reference: Petzold, A., Ogren, J. A., Fiebig, M., Laj, P., Li, S.-M., Baltensperger, U., Holzer-Popp, T., Kinne, S., Pappalardo, G., Sugimoto, N., Wehrli, C., Wiedensohler, A., and Zhang, X.-Y.: Recommendations for reporting "black carbon" measurements, Atmos. Chem. Phys., 13, 8365–8379, https://doi.org/10.5194/acp-13-8365-2013, 2013. - + kg m-3 - Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. Alkenes are unsaturated hydrocarbons as they contain chemical double bonds between adjacent carbon atoms. Alkenes contain only hydrogen and carbon combined in the general proportions C(n)H(2n); "alkenes" is the term used in standard names to describe the group of chemical species having this common structure that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. Standard names exist for some individual alkene species, e.g., ethene and propene. + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. "Pm10 aerosol" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 10 micrometers. The absorption equivalent black carbon mass concentration is obtained by conversion from the particle light absorption coefficient with a suitable mass absorption cross-section. Reference: Petzold, A., Ogren, J. A., Fiebig, M., Laj, P., Li, S.-M., Baltensperger, U., Holzer-Popp, T., Kinne, S., Pappalardo, G., Sugimoto, N., Wehrli, C., Wiedensohler, A., and Zhang, X.-Y.: Recommendations for reporting "black carbon" measurements, Atmos. Chem. Phys., 13, 8365–8379, https://doi.org/10.5194/acp-13-8365-2013, 2013. - + kg m-3 - Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for alpha_hexachlorocyclohexane is C6H6Cl6. + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. "Pm1 aerosol" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 1 micrometer. The absorption equivalent black carbon mass concentration is obtained by conversion from the particle light absorption coefficient with a suitable mass absorption cross-section. Reference: Petzold, A., Ogren, J. A., Fiebig, M., Laj, P., Li, S.-M., Baltensperger, U., Holzer-Popp, T., Kinne, S., Pappalardo, G., Sugimoto, N., Wehrli, C., Wiedensohler, A., and Zhang, X.-Y.: Recommendations for reporting "black carbon" measurements, Atmos. Chem. Phys., 13, 8365–8379, https://doi.org/10.5194/acp-13-8365-2013, 2013. - + kg m-3 - Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for alpha_pinene is C10H16. The IUPAC name for alpha-pinene is (1S,5S)-2,6,6-trimethylbicyclo[3.1.1]hept-2-ene. + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. "Pm2p5 aerosol" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 2.5 micrometers. The absorption equivalent black carbon mass concentration is obtained by conversion from the particle light absorption coefficient with a suitable mass absorption cross-section. Reference: Petzold, A., Ogren, J. A., Fiebig, M., Laj, P., Li, S.-M., Baltensperger, U., Holzer-Popp, T., Kinne, S., Pappalardo, G., Sugimoto, N., Wehrli, C., Wiedensohler, A., and Zhang, X.-Y.: Recommendations for reporting "black carbon" measurements, Atmos. Chem. Phys., 13, 8365–8379, https://doi.org/10.5194/acp-13-8365-2013, 2013. - + kg m-3 - Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for ammonia is NH3. + Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for acetic_acid is CH3COOH. The IUPAC name for acetic acid is ethanoic acid. - + kg m-3 - Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. The chemical formula for ammonium is NH4. + Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for aceto-nitrile is CH3CN. The IUPAC name for aceto-nitrile is ethanenitrile. - + kg m-3 - Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. "nmvoc" means non methane volatile organic compounds; "nmvoc" is the term used in standard names to describe the group of chemical species having this classification that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. "Anthropogenic" means influenced, caused, or created by human activity. + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The equivalent term in the NERC P01 Parameter Usage Vocabulary may be found at http://vocab.nerc.ac.uk/collection/P01/current/ATPXZZDZ/2/. + + + + kg m-3 + + + Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. Alkanes are saturated hydrocarbons, i.e. they do not contain any chemical double bonds. Alkanes contain only hydrogen and carbon combined in the general proportions C(n)H(2n+2); "alkanes" is the term used in standard names to describe the group of chemical species having this common structure that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. Standard names exist for some individual alkane species, e.g., methane and ethane. + + + + kg m-3 + + + Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. Alkenes are unsaturated hydrocarbons as they contain chemical double bonds between adjacent carbon atoms. Alkenes contain only hydrogen and carbon combined in the general proportions C(n)H(2n); "alkenes" is the term used in standard names to describe the group of chemical species having this common structure that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. Standard names exist for some individual alkene species, e.g., ethene and propene. + + + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula of alpha-carotene is C40H56. The equivalent term in the NERC P01 Parameter Usage Vocabulary may be found at http://vocab.nerc.ac.uk/collection/P01/current/BECAXXP1/2/. + + + + kg m-3 + + + Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for alpha_hexachlorocyclohexane is C6H6Cl6. + + + + kg m-3 + + + Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for alpha_pinene is C10H16. The IUPAC name for alpha-pinene is (1S,5S)-2,6,6-trimethylbicyclo[3.1.1]hept-2-ene. + + + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". Aluminium means aluminium in all chemical forms, commonly referred to as "total aluminium". "Sea floor sediment" is sediment deposited at the sea bed. + + + + kg m-3 + + + Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for ammonia is NH3. + + + + kg m-3 + + + Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. The chemical formula for ammonium is NH4. + + + + kg m-3 + + + Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. "nmvoc" means non methane volatile organic compounds; "nmvoc" is the term used in standard names to describe the group of chemical species having this classification that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. "Anthropogenic" means influenced, caused, or created by human activity. @@ -7786,6 +8435,13 @@ totals are summed to obtain the index. Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. Aromatic compounds in organic chemistry are compounds that contain at least one benzene ring of six carbon atoms joined by alternating single and double covalent bonds. The simplest aromatic compound is benzene itself. In standard names "aromatic_compounds" is the term used to describe the group of aromatic chemical species that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. Standard names exist for some individual aromatic species, e.g. benzene and xylene. + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". Arsenic means arsenic in all chemical forms, commonly referred to as "total arsenic". "Sea floor sediment" is sediment deposited at the sea bed. + + kg m-3 @@ -7814,6 +8470,13 @@ totals are summed to obtain the index. Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for benzene is C6H6. Benzene is the simplest aromatic hydrocarbon and has a ring structure consisting of six carbon atoms joined by alternating single and double chemical bonds. Each carbon atom is additionally bonded to one hydrogen atom. There are standard names that refer to aromatic_compounds as a group, as well as those for individual species. + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula of beta-carotene is C40H56. The equivalent term in the NERC P01 Parameter Usage Vocabulary may be found at http://vocab.nerc.ac.uk/collection/P01/current/BBCAXXP1/2/. + + kg m-3 @@ -7828,11 +8491,32 @@ totals are summed to obtain the index. Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. "nmvoc" means non methane volatile organic compounds; "nmvoc" is the term used in standard names to describe the group of chemical species having this classification that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. "Biogenic" means influenced, caused, or created by natural processes. - + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction "A_expressed_as_B", where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Mass concentration of biota expressed as carbon is also referred to as "carbon biomass". "Biological taxon" is a name or other label identifying an organism or a group of organisms as belonging to a unit of classification in a hierarchical taxonomy. There must be an auxiliary coordinate variable with standard name biological_taxon_name to identify the taxon in human readable format and optionally an auxiliary coordinate variable with standard name biological_taxon_lsid to provide a machine-readable identifier. See Section 6.1.2 of the CF convention (version 1.8 or later) for information about biological taxon auxiliary coordinate variables. + + + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction "A_expressed_as_B", where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. "Biological taxon" is a name or other label identifying an organism or a group of organisms as belonging to a unit of classification in a hierarchical taxonomy. There must be an auxiliary coordinate variable with standard name biological_taxon_name to identify the taxon in human readable format and optionally an auxiliary coordinate variable with standard name biological_taxon_lsid to provide a machine-readable identifier. See Section 6.1.2 of the CF convention (version 1.8 or later) for information about biological taxon auxiliary coordinate variables. Chlorophylls are the green pigments found in most plants, algae and cyanobacteria; their presence is essential for photosynthesis to take place. There are several different forms of chlorophyll that occur naturally. All contain a chlorin ring (chemical formula C20H16N4) which gives the green pigment and a side chain whose structure varies. The naturally occurring forms of chlorophyll contain between 35 and 55 carbon atoms. + + + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction "A_expressed_as_B", where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Mass concentration of biota expressed as nitrogen is also referred to as "nitrogen biomass". "Biological taxon" is a name or other label identifying an organism or a group of organisms as belonging to a unit of classification in a hierarchical taxonomy. There must be an auxiliary coordinate variable with standard name biological_taxon_name to identify the taxon in human readable format and optionally an auxiliary coordinate variable with standard name biological_taxon_lsid to provide a machine-readable identifier. See Section 6.1.2 of the CF convention (version 1.8 or later) for information about biological taxon auxiliary coordinate variables. + + + kg m-3 - Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. @@ -7860,7 +8544,7 @@ totals are summed to obtain the index. kg m-3 - Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. "Brox" describes a family of chemical species consisting of inorganic bromine compounds with the exception of hydrogen bromide (HBr) and bromine nitrate (BrONO2). "Brox" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity with a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. "Inorganic bromine", sometimes referred to as Bry, describes a family of chemical species which result from the degradation of source gases containing bromine (halons, methyl bromide, VSLS) and natural inorganic bromine sources such as volcanoes, seasalt and other aerosols. Standard names that use the term "inorganic_bromine" are used for quantities that contain all inorganic bromine species including HCl and ClONO2. The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Brox" describes a family of chemical species consisting of inorganic bromine compounds with the exception of hydrogen bromide (HBr) and bromine nitrate (BrONO2). The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. "Brox" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity with a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. "Inorganic bromine", sometimes referred to as Bry, describes a family of chemical species which result from the degradation of source gases containing bromine (halons, methyl bromide, VSLS) and natural inorganic bromine sources such as volcanoes, sea salt and other aerosols. Standard names that use the term "inorganic_bromine" are used for quantities that contain all inorganic bromine species including HCl and ClONO2. @@ -7870,11 +8554,18 @@ totals are summed to obtain the index. Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for butane is C4H10. Butane is a member of the group of hydrocarbons known as alkanes. There are standard names for the alkane group as well as for some of the individual species. + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". Cadmium means cadmium in all chemical forms, commonly referred to as "total cadmium". "Sea floor sediment" is sediment deposited at the sea bed. + + kg m-3 - 'Mass concentration' means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Chlorophylls are the green pigments found in most plants, algae and cyanobacteria; their presence is essential for photosynthesis to take place. There are several different forms of chlorophyll that occur naturally. All contain a chlorin ring (chemical formula C20H16N4) which gives the green pigment and a side chain whose structure varies. The naturally occurring forms of chlorophyll contain between 35 and 55 carbon atoms. 'Calcareous phytoplankton' are phytoplankton that produce calcite. Calcite is a mineral that is a polymorph of calcium carbonate. The chemical formula of calcite is CaCO3. Phytoplankton are autotrophic prokaryotic or eukaryotic algae that live near the water surface where there is sufficient light to support photosynthesis. + Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Chlorophylls are the green pigments found in most plants, algae and cyanobacteria; their presence is essential for photosynthesis to take place. There are several different forms of chlorophyll that occur naturally. All contain a chlorin ring (chemical formula C20H16N4) which gives the green pigment and a side chain whose structure varies. The naturally occurring forms of chlorophyll contain between 35 and 55 carbon atoms. "Calcareous phytoplankton" are phytoplankton that produce calcite. Calcite is a mineral that is a polymorph of calcium carbonate. The chemical formula of calcite is CaCO3. Phytoplankton are algae that grow where there is sufficient light to support photosynthesis. @@ -7884,6 +8575,34 @@ totals are summed to obtain the index. Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for carbon dioxide is CO2. + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. Chemically, "carbon" is the total sum of elemental, organic, and inorganic carbon. In measurements of carbonaceous aerosols, inorganic carbon is neglected and its mass is assumed to be distributed between the elemental and organic carbon components of the aerosol particles. Reference: Petzold, A., Ogren, J. A., Fiebig, M., Laj, P., Li, S.-M., Baltensperger, U., Holzer-Popp, T., Kinne, S., Pappalardo, G., Sugimoto, N., Wehrli, C., Wiedensohler, A., and Zhang, X.-Y.: Recommendations for reporting "black carbon" measurements, Atmos. Chem. Phys., 13, 8365–8379, https://doi.org/10.5194/acp-13-8365-2013, 2013. + + + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. "Pm10 aerosol" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 10 micrometers. Chemically, "carbon" is the total sum of elemental, organic, and inorganic carbon. In measurements of carbonaceous aerosols, inorganic carbon is neglected and its mass is assumed to be distributed between the elemental and organic carbon components of the aerosol particles. Reference: Petzold, A., Ogren, J. A., Fiebig, M., Laj, P., Li, S.-M., Baltensperger, U., Holzer-Popp, T., Kinne, S., Pappalardo, G., Sugimoto, N., Wehrli, C., Wiedensohler, A., and Zhang, X.-Y.: Recommendations for reporting "black carbon" measurements, Atmos. Chem. Phys., 13, 8365–8379, https://doi.org/10.5194/acp-13-8365-2013, 2013. + + + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. "Pm1 aerosol" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 1 micrometer. Chemically, "carbon" is the total sum of elemental, organic, and inorganic carbon. In measurements of carbonaceous aerosols, inorganic carbon is neglected and its mass is assumed to be distributed between the elemental and organic carbon components of the aerosol particles. Reference: Petzold, A., Ogren, J. A., Fiebig, M., Laj, P., Li, S.-M., Baltensperger, U., Holzer-Popp, T., Kinne, S., Pappalardo, G., Sugimoto, N., Wehrli, C., Wiedensohler, A., and Zhang, X.-Y.: Recommendations for reporting "black carbon" measurements, Atmos. Chem. Phys., 13, 8365–8379, https://doi.org/10.5194/acp-13-8365-2013, 2013. + + + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. "Pm2p5 aerosol" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 2.5 micrometers. Chemically, "carbon" is the total sum of elemental, organic, and inorganic carbon. In measurements of carbonaceous aerosols, inorganic carbon is neglected and its mass is assumed to be distributed between the elemental and organic carbon components of the aerosol particles. Reference: Petzold, A., Ogren, J. A., Fiebig, M., Laj, P., Li, S.-M., Baltensperger, U., Holzer-Popp, T., Kinne, S., Pappalardo, G., Sugimoto, N., Wehrli, C., Wiedensohler, A., and Zhang, X.-Y.: Recommendations for reporting "black carbon" measurements, Atmos. Chem. Phys., 13, 8365–8379, https://doi.org/10.5194/acp-13-8365-2013, 2013. + + kg m-3 @@ -7895,49 +8614,56 @@ totals are summed to obtain the index. kg m-3 - Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula of carbon tetrachloride is CCl4. + Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula of carbon tetrachloride is CCl4. The IUPAC name for carbon tetrachloride is tetrachloromethane. + + + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Carotene" refers to the sum of all forms of the carotenoid pigment carotene. The equivalent term in the NERC P01 Parameter Usage Vocabulary may be found at http://vocab.nerc.ac.uk/collection/P01/current/CAROXXXX/1/. kg m-3 - Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula of CFC113a CCl3CF3. The IUPAC name for CFC113a is 1,1,1-trichloro-2,2,2-trifluoro-ethane. + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula of CFC113a is CCl3CF3. The IUPAC name for CFC113a is 1,1,1-trichloro-2,2,2-trifluoroethane. kg m-3 - Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula of CFC113 is CCl2FCClF2. The IUPAC name for CFC113 is 1,1,2-trichloro-1,2,2-trifluoro-ethane. + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula of CFC113 is CCl2FCClF2. The IUPAC name for CFC113 is 1,1,2-trichloro-1,2,2-trifluoroethane. kg m-3 - Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula of CFC114 is CClF2CClF2. The IUPAC name for CFC114 is 1,2-dichloro-1,1,2,2-tetrafluoro-ethane. + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula of CFC114 is CClF2CClF2. The IUPAC name for CFC114 is 1,2-dichloro-1,1,2,2-tetrafluoroethane. kg m-3 - Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula of CFC115 is CClF2CF3. The IUPAC name for CFC115 is 1-chloro-1,1,2,2,2-pentafluoro-ethane. + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula of CFC115 is CClF2CF3. The IUPAC name for CFC115 is 1-chloro-1,1,2,2,2-pentafluoroethane. kg m-3 - Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula of CFC11 is CFCl3. The IUPAC name fof CFC11 is trichloro-fluoro-methane. + Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula of CFC11 is CFCl3. The IUPAC name for CFC11 is trichloro(fluoro)methane. kg m-3 - Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for CFC12 is CF2Cl2. The IUPAC name for CFC12 is dichloro-difluoro-methane. + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for CFC12 is CF2Cl2. The IUPAC name for CFC12 is dichloro(difluoro)methane. @@ -7961,6 +8687,20 @@ totals are summed to obtain the index. Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for chlorine nitrate is ClONO2. + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". Chlorophylls are the green pigments found in most plants, algae and cyanobacteria; their presence is essential for photosynthesis to take place. There are several different forms of chlorophyll that occur naturally. All contain a chlorin ring (chemical formula C20H16N4) which gives the green pigment and a side chain whose structure varies. The naturally occurring forms of chlorophyll contain between 35 and 55 carbon atoms. Chlorophyll-a is the most commonly occurring form of natural chlorophyll. The chemical formula of chlorophyll-a is C55H72O5N4Mg. "Sea floor sediment" is sediment deposited at the sea bed. + + + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". Chlorophylls are the green pigments found in most plants, algae and cyanobacteria; their presence is essential for photosynthesis to take place. There are several different forms of chlorophyll that occur naturally. All contain a chlorin ring (chemical formula C20H16N4) which gives the green pigment and a side chain whose structure varies. The naturally occurring forms of chlorophyll contain between 35 and 55 carbon atoms. Chlorophyll-a is the most commonly occurring form of natural chlorophyll. The chemical formula of chlorophyll-a is C55H72O5N4Mg. "Sea ice" means all ice floating in the sea which has formed from freezing sea water, rather than by other processes such as calving of land ice to form icebergs. + + kg m-3 @@ -7968,26 +8708,74 @@ totals are summed to obtain the index. 'Mass concentration' means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. Chlorophylls are the green pigments found in most plants, algae and cyanobacteria; their presence is essential for photosynthesis to take place. There are several different forms of chlorophyll that occur naturally. All contain a chlorin ring (chemical formula C20H16N4) which gives the green pigment and a side chain whose structure varies. The naturally occurring forms of chlorophyll contain between 35 and 55 carbon atoms. Chlorophyll-a is the most commonly occurring form of natural chlorophyll. The chemical formula of chlorophyll-a is C55H72O5N4Mg. + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". Chlorophylls are the green pigments found in most plants, algae and cyanobacteria; their presence is essential for photosynthesis to take place. There are several different forms of chlorophyll that occur naturally. All contain a chlorin ring (chemical formula C20H16N4) which gives the green pigment and a side chain whose structure varies. The naturally occurring forms of chlorophyll contain between 35 and 55 carbon atoms. The equivalent term in the NERC P01 Parameter Usage Vocabulary may be found at http://vocab.nerc.ac.uk/collection/P01/current/CHLBXXPX/2/. + + + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". Chlorophylls are the green pigments found in most plants, algae and cyanobacteria; their presence is essential for photosynthesis to take place. There are several different forms of chlorophyll that occur naturally. All contain a chlorin ring (chemical formula C20H16N4) which gives the green pigment and a side chain whose structure varies. The naturally occurring forms of chlorophyll contain between 35 and 55 carbon atoms. Chlorophyll c1c2 (sometimes written c1-c2 or c1+c2) means the sum of chlorophyll c1 and chlorophyll c2. The chemical formula of chlorophyll c1 is C35H30MgN4O5, and chlorophyll c2 is C35H28MgN4O5. The equivalent term in the NERC P01 Parameter Usage Vocabulary may be found at http://vocab.nerc.ac.uk/collection/P01/current/CHLC12PX/3/. + + + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". Chlorophylls are the green pigments found in most plants, algae and cyanobacteria; their presence is essential for photosynthesis to take place. There are several different forms of chlorophyll that occur naturally. All contain a chlorin ring (chemical formula C20H16N4) which gives the green pigment and a side chain whose structure varies. The naturally occurring forms of chlorophyll contain between 35 and 55 carbon atoms. The chemical formula of chlorophyll c3 is C36H44MgN4O7. The equivalent term in the NERC P01 Parameter Usage Vocabulary may be found at http://vocab.nerc.ac.uk/collection/P01/current/CHLC03PX/2/. + + + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". Chlorophylls are the green pigments found in most plants, algae and cyanobacteria; their presence is essential for photosynthesis to take place. There are several different forms of chlorophyll that occur naturally. All contain a chlorin ring (chemical formula C20H16N4) which gives the green pigment and a side chain whose structure varies. The naturally occurring forms of chlorophyll contain between 35 and 55 carbon atoms. + + + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". Chlorophylls are the green pigments found in most plants, algae and cyanobacteria; their presence is essential for photosynthesis to take place. There are several different forms of chlorophyll that occur naturally. All contain a chlorin ring (chemical formula C20H16N4) which gives the green pigment and a side chain whose structure varies. The naturally occurring forms of chlorophyll contain between 35 and 55 carbon atoms. Chlorophyll-c means chlorophyll c1+c2+c3. The chemical formula of chlorophyll c1 is C35H30MgN4O5, and chlorophyll c2 is C35H28MgN4O5. The chemical formula of chlorophyll c3 is C36H44MgN4O7. + + + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula of chlorophyllide-a is C35H34MgN4O5. + + kg m-3 - Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. Chlorophylls are the green pigments found in most plants, algae and cyanobacteria; their presence is essential for photosynthesis to take place. There are several different forms of chlorophyll that occur naturally. All contain a chlorin ring (chemical formula C20H16N4) which gives the green pigment and a side chain whose structure varies. The naturally occurring forms of chlorophyll contain between 35 and 55 carbon atoms. - + Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. Chlorophylls are the green pigments found in most plants, algae and cyanobacteria; their presence is essential for photosynthesis to take place. There are several different forms of chlorophyll that occur naturally. All contain a chlorin ring (chemical formula C20H16N4) which gives the green pigment and a side chain whose structure varies. The naturally occurring forms of chlorophyll contain between 35 and 55 carbon atoms. + + + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". Chromium means chromium in all chemical forms, commonly referred to as "total chromium". "Sea floor sediment" is sediment deposited at the sea bed. kg m-3 - Mass concentration means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". Cloud droplets are spherical and typically a few micrometers to a few tens of micrometers in diameter. An upper limit of 0.2 mm diameter is sometimes used to distinguish between cloud droplets and drizzle drops, but in active cumulus clouds strong updrafts can maintain much larger cloud droplets. + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. kg m-3 - Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. "Clox" describes a family of chemical species consisting of inorganic chlorine compounds with the exception of hydrogen chloride (HCl) and chlorine nitrate (ClONO2). "Clox" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity with a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. "Inorganic chlorine", sometimes referred to as Cly, describes a family of chemical species which result from the degradation of source gases containing chlorine (CFCs, HCFCs, VSLS) and natural inorganic chlorine sources such as seasalt and other aerosols. Standard names that use the term "inorganic_chlorine" are used for quantities that contain all inorganic chlorine species including HCl and ClONO2. The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Clox" describes a family of chemical species consisting of inorganic chlorine compounds with the exception of hydrogen chloride (HCl) and chlorine nitrate (ClONO2). The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. "Clox" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity with a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. "Inorganic chlorine", sometimes referred to as Cly, describes a family of chemical species which result from the degradation of source gases containing chlorine (CFCs, HCFCs, VSLS) and natural inorganic chlorine sources such as sea salt and other aerosols. Standard names that use the term "inorganic_chlorine" are used for quantities that contain all inorganic chlorine species including HCl and ClONO2. @@ -7997,6 +8785,13 @@ totals are summed to obtain the index. Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Ambient_aerosol" means that the aerosol is measured or modelled at the ambient state of pressure, temperature and relative humidity that exists in its immediate environment. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. Coarse mode aerosol particles have a diameter of more than 1 micrometer. + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as "nox_expressed_as_nitrogen". Cobalt means cobalt in all chemical forms, commonly referred to as "total cobalt". "Sea floor sediment" is sediment deposited at the sea bed. + + kg m-3 @@ -8004,32 +8799,53 @@ totals are summed to obtain the index. Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. Condensed water means liquid and ice. + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". Copper means copper in all chemical forms, commonly referred to as "total copper". "Sea floor sediment" is sediment deposited at the sea bed. + + + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula of diadinoxanthin is C40H54O3. The equivalent term in the NERC P01 Parameter Usage Vocabulary may be found at http://vocab.nerc.ac.uk/collection/P01/current/DIADXXXX/2/. + + kg m-3 - Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Diatoms are single-celled phytoplankton with an external skeleton made of silica. Phytoplankton are autotrophic prokaryotic or eukaryotic algae that live near the water surface where there is sufficient light to support photosynthesis. + Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Diatoms are single-celled phytoplankton with an external skeleton made of silica. Phytoplankton are a algae that grow where there is sufficient light to support photosynthesis. kg m-3 - 'Mass concentration' means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B containedin A, neglecting all other chemical constituents of A. Chlorophylls are the green pigments found in most plants, algae and cyanobacteria; their presence is essential for photosynthesis to take place. There are several different forms of chlorophyll that occur naturally. All contain a chlorin ring (chemical formula C20H16N4) which gives the green pigment and a side chain whose structure varies. The naturally occurring forms of chlorophyll contain between 35 and 55 carbon atoms. Diatoms are single-celled phytoplankton with an external skeleton made of silica. Phytoplankton are autotrophic prokaryotic or eukaryotic algae that live near the water surface where there is sufficient light to support photosynthesis. + Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Chlorophylls are the green pigments found in most plants, algae and cyanobacteria; their presence is essential for photosynthesis to take place. There are several different forms of chlorophyll that occur naturally. All contain a chlorin ring (chemical formula C20H16N4) which gives the green pigment and a side chain whose structure varies. The naturally occurring forms of chlorophyll contain between 35 and 55 carbon atoms. Diatoms are single-celled phytoplankton with an external skeleton made of silica. Phytoplankton are algae that grow where there is sufficient light to support photosynthesis. kg m-3 - Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Diatoms are single-celled phytoplankton with an external skeleton made of silica. Phytoplankton are autotrophic prokaryotic or eukaryotic algae that live near the water surface where there is sufficient light to support photosynthesis. + Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Diatoms are single-celled phytoplankton with an external skeleton made of silica. Phytoplankton are algae that grow where there is sufficient light to support photosynthesis. + + + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Phytoplankton are algae that grow where there is sufficient light to support photosynthesis. Diazotrophic phytoplankton are phytoplankton (predominantly from Phylum Cyanobacteria) that are able to fix molecular nitrogen (gas or solute) in addition to nitrate and ammonium. - + kg m-3 - 'Mass concentration' means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Chlorophylls are the green pigments found in most plants, algae and cyanobacteria; their presence is essential for photosynthesis to take place. There are several different forms of chlorophyll that occur naturally. All contain a chlorin ring (chemical formula C20H16N4) which gives the green pigment and a side chain whose structure varies. The naturally occurring forms of chlorophyll contain between 35 and 55 carbon atoms. In ocean modelling, diazotrophs are phytoplankton of the phylum cyanobacteria distinct from other phytoplankton groups in their ability to fix nitrogen gas in addition to nitrate and ammonium. Phytoplankton are autotrophic prokaryotic or eukaryotic organisms that live near the water surface where there is sufficient light to support photosynthesis. + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Chlorophylls are the green pigments found in most plants, algae and cyanobacteria; their presence is essential for photosynthesis to take place. There are several different forms of chlorophyll that occur naturally. All contain a chlorin ring (chemical formula C20H16N4) which gives the green pigment and a side chain whose structure varies. The naturally occurring forms of chlorophyll contain between 35 and 55 carbon atoms. Phytoplankton are algae that grow where there is sufficient light to support photosynthesis. Diazotrophic phytoplankton are phytoplankton (predominantly from Phylum Cyanobacteria) that are able to fix molecular nitrogen (gas or solute) in addition to nitrate and ammonium. @@ -8053,6 +8869,13 @@ totals are summed to obtain the index. Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for dinitrogen pentoxide is N2O5. + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". + + kg m-3 @@ -8074,6 +8897,34 @@ totals are summed to obtain the index. Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol takes up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the aerosol. "Dry aerosol particles" means aerosol particles without any water uptake. Chemically, "elemental carbon" is the carbonaceous fraction of particulate matter that is thermally stable in an inert atmosphere to high temperatures near 4000K and can only be gasified by oxidation starting at temperatures above 340 C. It is assumed to be inert and non-volatile under atmospheric conditions and insoluble in any solvent (Ogren and Charlson, 1983). + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. Chemically, "elemental carbon" is the carbonaceous fraction of particulate matter that is thermally stable in an inert atmosphere to high temperatures near 4000K and can only be gasified by oxidation starting at temperatures above 340 C. It is assumed to be inert and non-volatile under atmospheric conditions and insoluble in any solvent (Ogren and Charlson, 1983). In measurements of carbonaceous aerosols, elemental carbon samples may also include some inorganic carbon compounds, whose mass is neglected and assumed to be distributed between the elemental and organic carbon components of the aerosol particles. Reference: Petzold, A., Ogren, J. A., Fiebig, M., Laj, P., Li, S.-M., Baltensperger, U., Holzer-Popp, T., Kinne, S., Pappalardo, G., Sugimoto, N., Wehrli, C., Wiedensohler, A., and Zhang, X.-Y.: Recommendations for reporting "black carbon" measurements, Atmos. Chem. Phys., 13, 8365–8379, https://doi.org/10.5194/acp-13-8365-2013, 2013. + + + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. "Pm10 aerosol" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 10 micrometers. Chemically, "elemental carbon" is the carbonaceous fraction of particulate matter that is thermally stable in an inert atmosphere to high temperatures near 4000K and can only be gasified by oxidation starting at temperatures above 340 C. It is assumed to be inert and non-volatile under atmospheric conditions and insoluble in any solvent (Ogren and Charlson, 1983). In measurements of carbonaceous aerosols, elemental carbon samples may also include some inorganic carbon compounds, whose mass is neglected and assumed to be distributed between the elemental and organic carbon components of the aerosol particles. Reference: Petzold, A., Ogren, J. A., Fiebig, M., Laj, P., Li, S.-M., Baltensperger, U., Holzer-Popp, T., Kinne, S., Pappalardo, G., Sugimoto, N., Wehrli, C., Wiedensohler, A., and Zhang, X.-Y.: Recommendations for reporting "black carbon" measurements, Atmos. Chem. Phys., 13, 8365–8379, https://doi.org/10.5194/acp-13-8365-2013, 2013. + + + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. "Pm1 aerosol" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 1 micrometer. Chemically, "elemental carbon" is the carbonaceous fraction of particulate matter that is thermally stable in an inert atmosphere to high temperatures near 4000K and can only be gasified by oxidation starting at temperatures above 340 C. It is assumed to be inert and non-volatile under atmospheric conditions and insoluble in any solvent (Ogren and Charlson, 1983). In measurements of carbonaceous aerosols, elemental carbon samples may also include some inorganic carbon compounds, whose mass is neglected and assumed to be distributed between the elemental and organic carbon components of the aerosol particles. Reference: Petzold, A., Ogren, J. A., Fiebig, M., Laj, P., Li, S.-M., Baltensperger, U., Holzer-Popp, T., Kinne, S., Pappalardo, G., Sugimoto, N., Wehrli, C., Wiedensohler, A., and Zhang, X.-Y.: Recommendations for reporting "black carbon" measurements, Atmos. Chem. Phys., 13, 8365–8379, https://doi.org/10.5194/acp-13-8365-2013, 2013. + + + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. "Pm2p5 aerosol" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 2.5 micrometers. Chemically, "elemental carbon" is the carbonaceous fraction of particulate matter that is thermally stable in an inert atmosphere to high temperatures near 4000K and can only be gasified by oxidation starting at temperatures above 340 C. It is assumed to be inert and non-volatile under atmospheric conditions and insoluble in any solvent (Ogren and Charlson, 1983). In measurements of carbonaceous aerosols, elemental carbon samples may also include some inorganic carbon compounds, whose mass is neglected and assumed to be distributed between the elemental and organic carbon components of the aerosol particles. Reference: Petzold, A., Ogren, J. A., Fiebig, M., Laj, P., Li, S.-M., Baltensperger, U., Holzer-Popp, T., Kinne, S., Pappalardo, G., Sugimoto, N., Wehrli, C., Wiedensohler, A., and Zhang, X.-Y.: Recommendations for reporting "black carbon" measurements, Atmos. Chem. Phys., 13, 8365–8379, https://doi.org/10.5194/acp-13-8365-2013, 2013. + + kg m-3 @@ -8106,14 +8957,14 @@ totals are summed to obtain the index. kg m-3 - 'Mass concentration' means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. 'Flagellates' are a class of single celled organisms that use a flagellum (whip-like structure) for feeding and locomotion. Some flagellates can photosynthesize and others feed on bacteria, with a few flagellatescapable of both. + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. "Flagellates" are a class of single celled organisms that use a flagellum (whip-like structure) for feeding and locomotion. Some flagellates can photosynthesize and others feed on bacteria, with a few flagellates capable of both. kg m-3 - 'Mass concentration' means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. 'Flagellates' are a class of single celled organisms that use a flagellum (whip-like structure) for feeding and locomotion. Some flagellates can photosynthesize and others feed on bacteria, with a few flagellatescapable of both. + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. "Flagellates" are a class of single celled organisms that use a flagellum (whip-like structure) for feeding and locomotion. Some flagellates can photosynthesize and others feed on bacteria, with a few flagellates capable of both. @@ -8130,6 +8981,13 @@ totals are summed to obtain the index. Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for formic acid is HCOOH. The IUPAC name for formic acid is methanoic acid. + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula of fucoxanthin is C42H58O6. The equivalent term in the NERC P01 Parameter Usage Vocabulary may be found at http://vocab.nerc.ac.uk/collection/P01/current/FUCXZZZZ/2/. + + kg m-3 @@ -8148,35 +9006,35 @@ totals are summed to obtain the index. kg m-3 - Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for halon1202 is CBr2F2. The IUPAC name for halon1202 is dibromo-difluoro-methane. + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for Halon1202 is CBr2F2. The IUPAC name for Halon1202 is dibromo(difluoro)methane. kg m-3 - Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for halon1211 is CBrClF2. The IUPAC name for halon1211 is bromo-chloro-difluoro-methane. + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for Halon1211 is CBrClF2. The IUPAC name for Halon1211 is bromo-chloro-difluoromethane. kg m-3 - Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for halon1301 is CBrF3. The IUPAC name for halon1301 is bromo-trifluoro-methane. + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for Halon1301 is CBrF3. The IUPAC name for Halon1301 is bromo(trifluoro)methane. kg m-3 - Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for halo2402 is C2Br2F4. The IUPAC name for halon2402 is 1,2-dibromo-1,1,2,2-tetrafluoro-ethane. + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for Halon2402 is C2Br2F4. The IUPAC name for Halon2402 is 1,2-dibromo-1,1,2,2-tetrafluoroethane. kg m-3 - Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for hcc140a is CH3CCl3. The IUPAC name for hcc140a is 1,1,1-trichloro-ethane. + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for HCC140a, also called methyl chloroform, is CH3CCl3. The IUPAC name for HCC140a is 1,1,1-trichloroethane. @@ -8197,7 +9055,7 @@ totals are summed to obtain the index. kg m-3 - Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for HCFC22 is CHClF2. The IUPAC name for HCFC22 is chloro-difluoro-methane. + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for HCFC22 is CHClF2. The IUPAC name for HCFC22 is chloro(difluoro)methane. @@ -8246,14 +9104,14 @@ totals are summed to obtain the index. kg m-3 - Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for the hydroperoxyl radical is HO2. In chemistry, a 'radical' is a highly reactive, and therefore shortlived, species. + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for the hydroperoxyl radical is HO2. In chemistry, a "radical" is a highly reactive, and therefore short lived, species. kg m-3 - Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for the hydroxyl radical is OH. In chemistry, a 'radical' is a highly reactive, and therefore shortlived, species. + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for the hydroxyl radical is OH. In chemistry, a "radical" is a highly reactive, and therefore short lived, species. @@ -8274,14 +9132,14 @@ totals are summed to obtain the index. kg m-3 - Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. "Inorganic bromine", sometimes referred to as Bry, describes a family of chemical species which result from the degradation of source gases containing bromine (halons, methyl bromide, VSLS) and natural inorganic bromine sources such as volcanoes, seasalt and other aerosols. "Inorganic bromine" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. Standard names that use the term "brox" are used for quantities that contain all inorganic bromine species except HBr and BrONO2. + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Inorganic bromine", sometimes referred to as Bry, describes a family of chemical species which result from the degradation of source gases containing bromine (halons, methyl bromide, VSLS) and natural inorganic bromine sources such as volcanoes, sea salt and other aerosols. "Inorganic bromine" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. Standard names that use the term "brox" are used for quantities that contain all inorganic bromine species except HBr and BrONO2. kg m-3 - Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. "Inorganic chlorine", sometimes referred to as Cly, describes a family of chemical species which result from the degradation of source gases containing chlorine (CFCs, HCFCs, VSLS) and natural inorganic chlorine sources such as seasalt and other aerosols. "Inorganic chlorine" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. Standard names that use the term "clox" are used for quantities that contain all inorganic chlorine species except HCl and ClONO2. + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Inorganic chlorine", sometimes referred to as Cly, describes a family of chemical species which result from the degradation of source gases containing chlorine (CFCs, HCFCs, VSLS) and natural inorganic chlorine sources such as sea salt and other aerosols. "Inorganic chlorine" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. Standard names that use the term "clox" are used for quantities that contain all inorganic chlorine species except HCl and ClONO2. @@ -8291,18 +9149,32 @@ totals are summed to obtain the index. 'Mass concentration' means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. 'Inorganic nitrogen' describes a family of chemical species which, in an ocean model, usually includes nitrite, nitrate and ammonium which act as nitrogen nutrients. 'Inorganic nitrogen' is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". Iron means iron in all chemical forms, commonly referred to as "total iron". "Sea floor sediment" is sediment deposited at the sea bed. + + kg m-3 - Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for isoprene is CH2=C(CH3)CH=CH2. The IUPAC name for isoprene is 2-methyl-buta-1,3-diene. Isoprene is a member of the group of hydrocarbons known as terpenes. There are standard names for the terpene group as well as for some of the individual species. + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for isoprene is CH2=C(CH3)CH=CH2. The IUPAC name for isoprene is 2-methylbuta-1,3-diene. Isoprene is a member of the group of hydrocarbons known as terpenes. There are standard names for the terpene group as well as for some of the individual species. + + + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". Lead means lead in all chemical forms, commonly referred to as "total lead". "Sea floor sediment" is sediment deposited at the sea bed. kg m-3 - Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for limonene is C10H16. The IUPAC name for limonene is 1-methyl-4-prop-1-en-2-yl-cyclohexene. Limonene is a member of the group of hydrocarbons known as terpenes. There are standard names for the terpene group as well as for some of the individual species. + "Mass concentration" means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for limonene is C10H16. The IUPAC name for limonene is 1-methyl-4-prop-1-en-2-ylcyclohexene. Limonene is a member of the group of hydrocarbons known as terpenes. There are standard names for the terpene group as well as for some of the individual species. @@ -8312,6 +9184,27 @@ totals are summed to obtain the index. Mass concentration means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The mass concentration of liquid water takes into account all cloud droplets and liquid precipitation regardless of drop size or fall speed. + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". Lithium means lithium in all chemical forms, commonly referred to as "total lithium". "Sea floor sediment" is sediment deposited at the sea bed. + + + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula of lutein is C40H56O2. + + + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". Manganese means manganese in all chemical forms, commonly referred to as "total manganese". "Sea floor sediment" is sediment deposited at the sea bed. + + kg m-3 @@ -8319,6 +9212,13 @@ totals are summed to obtain the index. Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". Mercury means mercury in all chemical forms, commonly referred to as "total mercury". "Sea floor sediment" is sediment deposited at the sea bed. + + kg m-3 @@ -8358,14 +9258,21 @@ totals are summed to obtain the index. kg m-3 - Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for methyl_peroxy_radical is CH3O2. In chemistry, a 'radical' is a highly reactive, and therefore shortlived, species. + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for methyl_peroxy_radical is CH3O2. In chemistry, a "radical" is a highly reactive, and therefore short lived, species. + + + + kg m-3 + + + Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Chlorophylls are the green pigments found in most plants, algae and cyanobacteria; their presence is essential for photosynthesis to take place. There are several different forms of chlorophyll that occur naturally. All contain a chlorin ring (chemical formula C20H16N4) which gives the green pigment and a side chain whose structure varies. The naturally occurring forms of chlorophyll contain between 35 and 55 carbon atoms. Microphytoplankton are phytoplankton between 20 and 200 micrometers in size. Phytoplankton are algae that grow where there is sufficient light to support photosynthesis. kg m-3 - Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Chlorophylls are the green pigments found in most plants, algae and cyanobacteria; their presence is essential for photosynthesis to take place. There are several different forms of chlorophyll that occur naturally. All contain a chlorin ring (chemical formula C20H16N4) which gives the green pigment and a side chain whose structure varies. The naturally occurring forms of chlorophyll contain between 35 and 55 carbon atoms. 'Miscellaneous phytoplankton' are all those phytoplankton that are not diatoms, diazotrophs, calcareous phytoplankton, picophytoplankton or other seperately named components of the phytoplankton population. Phytoplankton are autotrophic prokaryotic or eukaryotic algae that live near the water surface where there is sufficient light to support photosynthesis. + Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Chlorophylls are the green pigments found in most plants, algae and cyanobacteria; their presence is essential for photosynthesis to take place. There are several different forms of chlorophyll that occur naturally. All contain a chlorin ring (chemical formula C20H16N4) which gives the green pigment and a side chain whose structure varies. The naturally occurring forms of chlorophyll contain between 35 and 55 carbon atoms. "Miscellaneous phytoplankton" are all those phytoplankton that are not diatoms, diazotrophs, calcareous phytoplankton, picophytoplankton or other separately named components of the phytoplankton population. Phytoplankton are algae that grow where there is sufficient light to support photosynthesis. @@ -8375,39 +9282,67 @@ totals are summed to obtain the index. Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for molecular hydrogen is H2. - + kg m-3 - Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. The chemical formula for the nitrate anion is NO3-. + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". - + kg m-3 - Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. In chemistry, a 'radical' is a highly reactive, and therefore shortlived, species. + Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Chlorophylls are the green pigments found in most plants, algae and cyanobacteria; their presence is essential for photosynthesis to take place. There are several different forms of chlorophyll that occur naturally. All contain a chlorin ring (chemical formula C20H16N4) which gives the green pigment and a side chain whose structure varies. The naturally occurring forms of chlorophyll contain between 35 and 55 carbon atoms. Nanophytoplankton are phytoplankton between 2 and 20 micrometers in size. Phytoplankton are algae that grow where there is sufficient light to support photosynthesis. - + kg m-3 - Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for nitric acid is HNO3. + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". Nickel means nickel in all chemical forms, commonly referred to as "total nickel". "Sea floor sediment" is sediment deposited at the sea bed. - + kg m-3 - Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Ambient_aerosol" means that the aerosol is measured or modelled at the ambient state of pressure, temperature and relative humidity that exists in its immediate environment. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. The chemical formula for nitric acid is HNO3. Nitric acid trihydrate, sometimes referred to as NAT, is a stable crystalline substance consisting of three molecules of water to one molecule of nitric acid. + Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. The chemical formula for the nitrate anion is NO3-. - + kg m-3 - Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for nitrogen dioxide is NO2. + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". In chemistry, a "radical" is a highly reactive, and therefore short lived, species. + + + + kg m-3 + + + Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for nitric acid is HNO3. + + + + kg m-3 + + + Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Ambient_aerosol" means that the aerosol is measured or modelled at the ambient state of pressure, temperature and relative humidity that exists in its immediate environment. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. The chemical formula for nitric acid is HNO3. Nitric acid trihydrate, sometimes referred to as NAT, is a stable crystalline substance consisting of three molecules of water to one molecule of nitric acid. + + + + kg m-3 + + + Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for nitrogen dioxide is NO2. + + + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". Total nitrogen means nitrogen in all chemical forms. "Sea floor sediment" is sediment deposited at the sea bed. @@ -8452,6 +9387,41 @@ totals are summed to obtain the index. Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. "Noy" describes a family of chemical species. The family usually includes atomic nitrogen (N), nitrogen monoxide (NO), nitrogen dioxide (NO2), dinitrogen pentoxide (N2O5), nitric acid (HNO3), peroxynitric acid (HNO4), bromine nitrate (BrONO2) , chlorine nitrate (ClONO2) and organic nitrates (most notably peroxyacetyl nitrate, sometimes referred to as PAN, (CH3COO2NO2)). The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. Chemically, "organic carbon aerosol" refers to the carbonaceous fraction of particulate matter contained in any of the vast number of compounds where carbon is chemically combined with hydrogen and other elements like O, S, N, P, Cl, etc. In measurements of carbonaceous aerosols, organic carbon samples may also include some inorganic carbon compounds, whose mass is neglected and assumed to be distributed between the elemental and organic carbon components of the aerosol particles. Reference: Petzold, A., Ogren, J. A., Fiebig, M., Laj, P., Li, S.-M., Baltensperger, U., Holzer-Popp, T., Kinne, S., Pappalardo, G., Sugimoto, N., Wehrli, C., Wiedensohler, A., and Zhang, X.-Y.: Recommendations for reporting "black carbon" measurements, Atmos. Chem. Phys., 13, 8365–8379, https://doi.org/10.5194/acp-13-8365-2013, 2013. + + + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. "Pm10 aerosol" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 10 micrometers. Chemically, "organic carbon aerosol" refers to the carbonaceous fraction of particulate matter contained in any of the vast number of compounds where carbon is chemically combined with hydrogen and other elements like O, S, N, P, Cl, etc. In measurements of carbonaceous aerosols, organic carbon samples may also include some inorganic carbon compounds, whose mass is neglected and assumed to be distributed between the elemental and organic carbon components of the aerosol particles. Reference: Petzold, A., Ogren, J. A., Fiebig, M., Laj, P., Li, S.-M., Baltensperger, U., Holzer-Popp, T., Kinne, S., Pappalardo, G., Sugimoto, N., Wehrli, C., Wiedensohler, A., and Zhang, X.-Y.: Recommendations for reporting "black carbon" measurements, Atmos. Chem. Phys., 13, 8365–8379, https://doi.org/10.5194/acp-13-8365-2013, 2013. + + + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. "Pm1 aerosol" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 1 micrometer. Chemically, "organic carbon aerosol" refers to the carbonaceous fraction of particulate matter contained in any of the vast number of compounds where carbon is chemically combined with hydrogen and other elements like O, S, N, P, Cl, etc. In measurements of carbonaceous aerosols, organic carbon samples may also include some inorganic carbon compounds, whose mass is neglected and assumed to be distributed between the elemental and organic carbon components of the aerosol particles. Reference: Petzold, A., Ogren, J. A., Fiebig, M., Laj, P., Li, S.-M., Baltensperger, U., Holzer-Popp, T., Kinne, S., Pappalardo, G., Sugimoto, N., Wehrli, C., Wiedensohler, A., and Zhang, X.-Y.: Recommendations for reporting "black carbon" measurements, Atmos. Chem. Phys., 13, 8365–8379, https://doi.org/10.5194/acp-13-8365-2013, 2013. + + + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. "Pm2p5 aerosol" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 2.5 micrometers. Chemically, "organic carbon aerosol" refers to the carbonaceous fraction of particulate matter contained in any of the vast number of compounds where carbon is chemically combined with hydrogen and other elements like O, S, N, P, Cl, etc. In measurements of carbonaceous aerosols, organic carbon samples may also include some inorganic carbon compounds, whose mass is neglected and assumed to be distributed between the elemental and organic carbon components of the aerosol particles. Reference: Petzold, A., Ogren, J. A., Fiebig, M., Laj, P., Li, S.-M., Baltensperger, U., Holzer-Popp, T., Kinne, S., Pappalardo, G., Sugimoto, N., Wehrli, C., Wiedensohler, A., and Zhang, X.-Y.: Recommendations for reporting "black carbon" measurements, Atmos. Chem. Phys., 13, 8365–8379, https://doi.org/10.5194/acp-13-8365-2013, 2013. + + + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". Organic carbon describes a family of chemical species and is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. "Sea floor sediment" is sediment deposited at the sea bed. + + kg m-3 @@ -8494,6 +9464,13 @@ totals are summed to obtain the index. Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol takes up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the aerosol. "Dry aerosol particles" means aerosol particles without any water uptake. The term "particulate_organic_matter_dry_aerosol" means all particulate organic matter dry aerosol except elemental carbon. It is the sum of primary_particulate_organic_matter_dry_aerosol and secondary_particulate_organic_matter_dry_aerosol. + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The equivalent term in the NERC P01 Parameter Usage Vocabulary may be found at http://vocab.nerc.ac.uk/collection/P01/current/PERDXXXX/2/. + + kg m-3 @@ -8512,7 +9489,7 @@ totals are summed to obtain the index. kg m-3 - Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The term "peroxy_radicals" means all organic and inorganic peroxy radicals. This includes HO2 and all organic peroxy radicals, sometimes referred to as RO2. In chemistry, a 'radical' is a highly reactive, and therefore shortlived, species. + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The term "peroxy_radicals" means all organic and inorganic peroxy radicals. This includes HO2 and all organic peroxy radicals, sometimes referred to as RO2. In chemistry, a "radical" is a highly reactive, and therefore short lived, species. @@ -8522,6 +9499,27 @@ totals are summed to obtain the index. Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". Petroleum hydrocarbons are compounds containing just carbon and hydrogen originating from the fossil fuel crude oil. + + kg m-3 + + + Concentration of phaeopigment per unit volume of the water body, where the filtration size or collection method is unspecified (equivalent term in the NERC P01 Parameter Usage Vocabulary may be found at http://vocab.nerc.ac.uk/collection/P01/current/. "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". Phaeopigments are a group of non-photosynthetic pigments that are the degradation product of algal chlorophyll pigments. Phaeopigments contain phaeophytin, which fluoresces in response to excitation light, and phaeophorbide, which is colorless and does not fluoresce (source: https://academic.oup.com/plankt/article/24/11/1221/1505482). Phaeopigment concentration commonly increases during the development phase of marine phytoplankton blooms, and declines in the post bloom stage (source: https://www.sciencedirect.com/science/article/pii/0967063793901018). "Sea floor sediment" is sediment deposited at the sea bed. + + + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". Phaeopigments are non-photosynthetic pigments that are the degradation product of algal chlorophyll pigments. It is commonly formed during and after marine phytoplankton blooms. "Sea ice" means all ice floating in the sea which has formed from freezing sea water, rather than by other processes such as calving of land ice to form icebergs. + + + + kg m-3 + + + Concentration of phaeopigment per unit volume of the water body, where the filtration size or collection method is unspecified (equivalent term in the NERC P01 Parameter Usage Vocabulary may be found at http://vocab.nerc.ac.uk/collection/P01/current/. "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". Phaeopigments are a group of non-photosynthetic pigments that are the degradation product of algal chlorophyll pigments. Phaeopigments contain phaeophytin, which fluoresces in response to excitation light, and phaeophorbide, which is colorless and does not fluoresce (source: https://academic.oup.com/plankt/article/24/11/1221/1505482). Phaeopigment concentration commonly increases during the development phase of marine phytoplankton blooms, and declines in the post bloom stage (source: https://www.sciencedirect.com/science/article/pii/0967063793901018). + + kg m-3 @@ -8529,18 +9527,25 @@ totals are summed to obtain the index. Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". Phosphorus means phosphorus in all chemical forms, commonly referred to as "total phosphorus". "Sea floor sediment" is sediment deposited at the sea bed. + + kg m-3 - Mass concentration means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Phytoplankton are autotrophic prokaryotic or eukaryotic algae that live near the water surface where there is sufficient light to support photosynthesis. Standard names also exist for the mass concentration of a number of components that make up the total phytoplankton population, such as diatoms, diazotrophs, calcareous phytoplankton, picophytoplankton and miscellaneous phytoplankton. Chlorophylls are the green pigments found in most plants, algae and cyanobacteria; their presence is essential for photosynthesis to take place. There are several different forms of chlorophyll that occur naturally. All contain a chlorin ring (chemical formula C20H16N4) which gives the green pigment and a side chain whose structure varies. The naturally occurring forms of chlorophyll contain between 35 and 55 carbon atoms. + Mass concentration means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Phytoplankton are algae that grow where there is sufficient light to support photosynthesis. Standard names also exist for the mass concentration of a number of components that make up the total phytoplankton population, such as diatoms, diazotrophs, calcareous phytoplankton, picophytoplankton and miscellaneous phytoplankton. Chlorophylls are the green pigments found in most plants, algae and cyanobacteria; their presence is essential for photosynthesis to take place. There are several different forms of chlorophyll that occur naturally. All contain a chlorin ring (chemical formula C20H16N4) which gives the green pigment and a side chain whose structure varies. The naturally occurring forms of chlorophyll contain between 35 and 55 carbon atoms. kg m-3 - Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Chlorophylls are the green pigments found in most plants, algae and cyanobacteria; their presence is essential for photosynthesis to take place. There are several different forms of chlorophyll that occur naturally. All contain a chlorin ring (chemical formula C20H16N4) which gives the green pigment and a side chain whose structure varies. The naturally occurring forms of chlorophyll contain between 35 and 55 carbon atoms. Picophytoplankton are phytoplankton of less than 2 micrometers in size. Phytoplankton are autotrophic prokaryotic or eukaryotic algae that live near the water surface where there is sufficient light to support photosynthesis. + Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Chlorophylls are the green pigments found in most plants, algae and cyanobacteria; their presence is essential for photosynthesis to take place. There are several different forms of chlorophyll that occur naturally. All contain a chlorin ring (chemical formula C20H16N4) which gives the green pigment and a side chain whose structure varies. The naturally occurring forms of chlorophyll contain between 35 and 55 carbon atoms. Picophytoplankton are phytoplankton of less than 2 micrometers in size. Phytoplankton are algae that grow where there is sufficient light to support photosynthesis. @@ -8592,6 +9597,13 @@ totals are summed to obtain the index. Mass concentration means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. "Pm2p5 aerosol" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 2.5 micrometers. + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula of prasinoxanthin is C40H56O4. The equivalent term in the NERC P01 Parameter Usage Vocabulary may be found at http://vocab.nerc.ac.uk/collection/P01/current/PXAPXXXX/2/. + + kg m-3 @@ -8599,6 +9611,13 @@ totals are summed to obtain the index. Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol takes up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the aerosol. "Dry aerosol particles" means aerosol particles without any water uptake. "Primary particulate organic matter " means all organic matter emitted directly to the atmosphere as particles except elemental carbon. The sum of primary_particulate_organic_matter_dry_aerosol and secondary_particulate_organic_matter_dry_aerosol is particulate_organic_matter_dry_aerosol. + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction "A_expressed_as_B", where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. "Prokaryotes" means all Bacteria and Archaea excluding photosynthetic cyanobacteria such as Synechococcus and Prochlorococcus or other separately named components of the prokaryotic population. + + kg m-3 @@ -8655,6 +9674,13 @@ totals are summed to obtain the index. Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". Silver means silver in all chemical forms, commonly referred to as "total silver". "Sea floor sediment" is sediment deposited at the sea bed. + + kg m-3 @@ -8694,7 +9720,21 @@ totals are summed to obtain the index. kg m-3 - Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for toluene is C6H5CH3. Toluene has the same structure as benzene, except that one of the hydrogen atoms is replaced by a methyl group. The systematic name for toluene is methylbenzene. + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for toluene is C6H5CH3. Toluene has the same structure as benzene, except that one of the hydrogen atoms is replaced by a methyl group. The IUPAC name for toluene is methylbenzene. + + + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". Vanadium means vanadium in all chemical forms, commonly referred to as "total vanadium". "Sea floor sediment" is sediment deposited at the sea bed. + + + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula of violaxanthin is C40H56O4. @@ -8725,6 +9765,27 @@ totals are summed to obtain the index. Mass concentration means mass per unit volume and is used in the construction mass_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for xylene is C6H4C2H6. In chemistry, xylene is a generic term for a group of three isomers of dimethylbenzene. The IUPAC names for the isomers are 1,2-dimethylbenzene, 1,3-dimethylbenzene and 1,4-dimethylbenzene. Xylene is an aromatic hydrocarbon. There are standard names that refer to aromatic_compounds as a group, as well as those for individual species. + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula of zeaxanthin is C40H56O2. The equivalent term in the NERC P01 Parameter Usage Vocabulary may be found at http://vocab.nerc.ac.uk/collection/P01/current/ZEAXXXXX/2/. + + + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". Zinc means zinc in all chemical forms, commonly referred to as "total zinc". "Sea floor sediment" is sediment deposited at the sea bed. + + + + kg m-3 + + + "Mass concentration" means mass per unit volume and is used in the construction "mass_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. "Zooplankton" means the total zooplankton population, with components such as mesozooplankton, microzooplankton and miscellaneous zooplankton. + + kg m-2 @@ -8764,7 +9825,7 @@ totals are summed to obtain the index. kg m-2 - "Content" indicates a quantity per unit area. "Layer" means any layer with upper and lower boundaries that have constant values in some vertical coordinate. There must be a vertical coordinate variable indicating the extent of the layer(s). If the layers are model layers, the vertical coordinate can be model_level_number, but it is recommended to specify a physical coordinate (in a scalar or auxiliary coordinate variable) as well. + The "content_of_X_in_atmosphere_layer" refers to the vertical integral between two specified levels in the atmosphere. "Content" indicates a quantity per unit area. "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. "Layer" means any layer with upper and lower boundaries that have constant values in some vertical coordinate. There must be a vertical coordinate variable indicating the extent of the layer(s). If the layers are model layers, the vertical coordinate can be model_level_number, but it is recommended to specify a physical coordinate (in a scalar or auxiliary coordinate variable) as well. @@ -9023,42 +10084,42 @@ totals are summed to obtain the index. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for bromine chloride is BrCl. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for bromine chloride is BrCl. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for bromine monoxide is BrO. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for bromine monoxide is BrO. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for bromine nitrate is BrONO2. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for bromine nitrate is BrONO2. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. "Brox" describes a family of chemical species consisting of inorganic bromine compounds with the exception of hydrogen bromide (HBr) and bromine nitrate (BrONO2). "Brox" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity with a group chemical standard name can vary between models. Where possible, the data variable should be accompaniedby a complete description of the species represented, for example, by using a comment attribute. "Inorganic bromine", sometimes referred to as Bry, describes a family of chemical species which result from the degradation of source gases containing bromine (halons, methyl bromide, VSLS) and natural inorganic bromine sources such as volcanoes, seasalt and other aerosols. Standard names that use the term "inorganic_bromine" are usedfor quantities that contain all inorganic bromine species including HCl and ClONO2. The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. "Brox" describes a family of chemical species consisting of inorganic bromine compounds with the exception of hydrogen bromide (HBr) and bromine nitrate (BrONO2). "Brox" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity with a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. "Inorganic bromine", sometimes referred to as Bry, describes a family of chemical species which result from the degradation of source gases containing bromine (halons, methyl bromide, VSLS) and natural inorganic bromine sources such as volcanoes, sea salt and other aerosols. Standard names that use the term "inorganic_bromine" are used for quantities that contain all inorganic bromine species including HBr and BrONO2. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for butane is C4H10. Butane is a member of the group of hydrocarbons known as alkanes. There are standard names for the alkane group as well as for some of the individual species. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for butane is C4H10. Butane is a member of the group of hydrocarbons known as alkanes. There are standard names for the alkane group as well as for some of the individual species. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for carbon dioxide is CO2. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for carbon dioxide is CO2. @@ -9072,77 +10133,77 @@ totals are summed to obtain the index. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula of carbon monoxide is CO. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula of carbon monoxide is CO. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula of carbon tetrachloride is CCl4. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula of carbon tetrachloride is CCl4. The IUPAC name for carbon tetrachloride is tetrachloromethane. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula of CFC113a CCl3CF3. The IUPAC name for CFC113a is 1,1,1-trichloro-2,2,2-trifluoro-ethane. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula of CFC113a is CCl3CF3. The IUPAC name for CFC113a is 1,1,1-trichloro-2,2,2-trifluoroethane. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula of CFC113 is CCl2FCClF2. The IUPAC name for CFC113 is 1,1,2-trichloro-1,2,2-trifluoro-ethane. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula of CFC113 is CCl2FCClF2. The IUPAC name for CFC113 is 1,1,2-trichloro-1,2,2-trifluoroethane. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula of CFC114 is CClF2CClF2. The IUPAC name for CFC114 is 1,2-dichloro-1,1,2,2-tetrafluoro-ethane. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula of CFC114 is CClF2CClF2. The IUPAC name for CFC114 is 1,2-dichloro-1,1,2,2-tetrafluoroethane. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula of CFC115 is CClF2CF3. The IUPAC name for CFC115 is 1-chloro-1,1,2,2,2-pentafluoro-ethane. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula of CFC115 is CClF2CF3. The IUPAC name for CFC115 is 1-chloro-1,1,2,2,2-pentafluoroethane. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula of CFC11 is CFCl3. The IUPAC name fof CFC11 is trichloro-fluoro-methane. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula of CFC11 is CFCl3. The IUPAC name for CFC11 is trichloro(fluoro)methane. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for CFC12 is CF2Cl2. The IUPAC name for CFC12 is dichloro-difluoro-methane. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for CFC12 is CF2Cl2. The IUPAC name for CFC12 is dichloro(difluoro)methane. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for chlorine dioxide is OClO. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for chlorine dioxide is OClO. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for chlorine monoxide is ClO. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for chlorine monoxide is ClO. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for chlorine nitrate is ClONO2. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for chlorine nitrate is ClONO2. @@ -9152,6 +10213,13 @@ totals are summed to obtain the index. Mass fraction is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. Chlorophylls are the green pigments found in most plants, algae and cyanobacteria; their presence is essential for photosynthesis to take place. There are several different forms of chlorophyll that occur naturally; all contain a chlorin ring which gives the green pigment and a side chain whose structure varies. Chlorophyll-a is the most commonly occurring form of natural chlorophyll. + + 1 + + + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It is evaluated as the mass of X divided by the mass of Y (including X). It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. Grain-size class distribution is based on the Udden-Wentworth scale. + + 1 @@ -9170,14 +10238,14 @@ totals are summed to obtain the index. 1 clw - Mass fraction is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. "Clox" describes a familyof chemical species consisting of inorganic chlorine compounds with the exception of hydrogen chloride (HCl) and chlorine nitrate (ClONO2). "Clox" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity with a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. "Inorganic chlorine", sometimes referred to as Cly, describes a family of chemical species which result from the degradation of source gases containing chlorine (CFCs, HCFCs, VSLS) and natural inorganic chlorine sources such as seasalt and other aerosols. Standard names that use the term "inorganic_chlorine" are used for quantitiesthat contain all inorganic chlorine species including HCl and ClONO2. The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A.It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. "Clox" describes a family of chemical species consisting of inorganic chlorine compounds with the exception of hydrogen chloride (HCl) and chlorine nitrate (ClONO2). "Clox" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity with a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. "Inorganic chlorine", sometimes referred to as Cly, describes a family of chemical species which result from the degradation of source gases containing chlorine (CFCs, HCFCs, VSLS) and natural inorganic chlorine sources such as sea salt and other aerosols. Standard names that use the term "inorganic_chlorine" are used for quantities that contain all inorganic chlorine species including HCl and ClONO2. @@ -9198,14 +10266,14 @@ totals are summed to obtain the index. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). Convective cloud is that produced by the convection schemes in an atmosphere model. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". Convective cloud is that produced by the convection schemes in an atmosphere model. "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for dichlorineperoxide is Cl2O2. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for dichlorine peroxide is Cl2O2. @@ -9219,7 +10287,7 @@ totals are summed to obtain the index. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for dinitrogenpentoxide is N2O5. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for dinitrogen pentoxide is N2O5. @@ -9240,42 +10308,42 @@ totals are summed to obtain the index. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for ethane is C2H6. Ethane is a member of the group of hydrocarbons known as alkanes. There are standardnames for the alkane group as well as for some of the individual species. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for ethane is C2H6. Ethane is a member of the group of hydrocarbons known as alkanes. There are standard names for the alkane group as well as for some of the individual species. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for ethanol isC2H5OH. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for ethanol is C2H5OH. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for ethene is C2H4. Ethene is a member of the group of hydrocarbons known as alkenes. There are standardnames for the alkene group as well as for some of the individual species. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for ethene is C2H4. Ethene is a member of the group of hydrocarbons known as alkenes. There are standard names for the alkene group as well as for some of the individual species. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for ethyne is HC2H. Ethyne is the IUPAC name for this species, which is also commonly known as acetylene. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for ethyne is HC2H. Ethyne is the IUPAC name for this species, which is also commonly known as acetylene. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for formaldehyde is CH2O. The IUPAC name for formaldehyde is methanal. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for formaldehyde is CH2O. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for formic acid is HCOOH. The IUPAC name for formic acid is methanoic acid. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for formic acid is HCOOH. The IUPAC name for formic acid is methanoic acid. @@ -9289,14 +10357,14 @@ totals are summed to obtain the index. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. "Divalent mercury" means all compounds in which the mercury has two binding sites to other ion(s) in a salt or to other atom(s) in a molecule. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Divalent mercury" means all compounds in which the mercury has two binding sites to other ion(s) in a salt or to other atom(s) in a molecule. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical symbol for mercury is Hg. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical symbol for mercury is Hg. @@ -9313,6 +10381,13 @@ totals are summed to obtain the index. Mass fraction is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). Graupel consists of heavily rimed snow particles, often called snow pellets; often indistinguishable from very small soft hail except when the size convention that hail must have a diameter greater than 5 mm is adopted. Reference: American Meteorological Society Glossary http://glossary.ametsoc.org/wiki/Graupel. There are also separate standard names for hail. Standard names for "graupel_and_hail" should be used to describe data produced by models that do not distinguish between hail and graupel. + + 1 + + + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y'', where X is a material constituent of Y. It is evaluated as the mass of X divided by the mass of Y (including X). It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. Grain-size class distribution is based on the Udden-Wentworth scale. + + 1 @@ -9324,168 +10399,175 @@ totals are summed to obtain the index. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for halon1202 is CBr2F2. The IUPAC name for halon1202 is dibromo-difluoro-methane. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for Halon1202 is CBr2F2. The IUPAC name for Halon1202 is dibromo(difluoro)methane. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for halon1211 is CBrClF2. The IUPAC name for halon1211 is bromo-chloro-difluoro-methane. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for Halon1211 is CBrClF2. The IUPAC name for Halon1211 is bromo-chloro-difluoromethane. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for halon1301 is CBrF3. The IUPAC name for halon1301 is bromo-trifluoro-methane. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for Halon1301 is CBrF3. The IUPAC name for Halon1301 is bromo(trifluoro)methane. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for halo2402 is C2Br2F4. The IUPAC name for halon2402 is 1,2-dibromo-1,1,2,2-tetrafluoro-ethane. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for Halon2402 is C2Br2F4. The IUPAC name for Halon2402 is 1,2-dibromo-1,1,2,2-tetrafluoroethane. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for hcc140a isCH3CCl3. The IUPAC name for hcc140a is 1,1,1-trichloro-ethane. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for HCC140a, also called methyl chloroform, is CH3CCl3. The IUPAC name for HCC140a is 1,1,1-trichloroethane. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for HCFC141b is CH3CCl2F. The IUPAC name for HCFC141b is 1,1-dichloro-1-fluoroethane. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for HCFC141b is CH3CCl2F. The IUPAC name for HCFC141b is 1,1-dichloro-1-fluoroethane. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for HCFC142b is CH3CClF2. The IUPAC name for HCFC142b is 1-chloro-1,1-difluoroethane. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for HCFC142b is CH3CClF2. The IUPAC name for HCFC142b is 1-chloro-1,1-difluoroethane. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for HCFC22 is CHClF2. The IUPAC name for HCFC22 is chloro-difluoro-methane. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for HCFC22 is CHClF2. The IUPAC name for HCFC22 is chloro(difluoro)methane. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for hexachlorobiphenyl is C12H4Cl6. This structure of this species consists of two linked benzene rings,each of which is additionally bonded to three chlorine atoms. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for hexachlorobiphenyl is C12H4Cl6. The structure of this species consists of two linked benzene rings, each of which is additionally bonded to three chlorine atoms. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. "HOx" means a combination of two radical species containing hydrogen and oxygen: OH and HO2. The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. "HOx" means a combination of two radical species containing hydrogen and oxygen, OH and HO2. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for hydrogen bromide is HBr. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for hydrogen bromide is HBr. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for hydrogen chloride is HCl. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for hydrogen chloride is HCl. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for hydrogen cyanide is HCN. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for hydrogen cyanide is HCN. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for hydrogen peroxide is H2O2. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for hydrogen peroxide is H2O2. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for the hydroperoxyl radical is HO2. In chemistry, a 'radical' is a highly reactive, and therefore shortlived, species. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for the hydroperoxyl radical is HO2. In chemistry, a "radical" is a highly reactive, and therefore short lived, species. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for the hydroxyl radical is OH. In chemistry, a 'radical' is a highly reactive, and therefore shortlived,species. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for the hydroxyl radical is OH. In chemistry, a "radical" is a highly reactive, and therefore short lived,species. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for hypobromous acid is HOBr. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for hypobromous acid is HOBr. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for hypochlorous acid is HOCl. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for hypochlorous acid is HOCl. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. "Inorganic bromine", sometimes referred to as Bry, describes a family of chemical species which result from the degradation of source gases containing bromine (halons, methyl bromide, VSLS) and natural inorganic bromine sources such as volcanoes, seasalt and other aerosols. "Inorganic bromine" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. Standard names that use the term "brox" are used for quantities that contain all inorganic bromine species except HBr and BrONO2. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Inorganic bromine", sometimes referred to as Bry, describes a family of chemical species which result from the degradation of source gases containing bromine (halons, methyl bromide, VSLS) and natural inorganic bromine sources such as volcanoes, sea salt and other aerosols. "Inorganic bromine" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. Standard names that use the term "brox" are used for quantities that contain all inorganic bromine species except HBr and BrONO2. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. "Inorganic chlorine", sometimes referred to as Cly, describes a family of chemical species which result from the degradation of source gases containing chlorine (CFCs, HCFCs, VSLS) and natural inorganic chlorine sources such as seasalt and other aerosols. "Inorganic chlorine" is the termused in standard names for all species belonging to the family that are represented withina given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. Standard names that use the term "clox" are used for quantities that contain all inorganic chlorine species except HCl and ClONO2. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Inorganic chlorine", sometimes referred to as Cly, describes a family of chemical species which result from the degradation of source gases containing chlorine (CFCs, HCFCs, VSLS) and natural inorganic chlorine sources such as sea salt and other aerosols. "Inorganic chlorine" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. Standard names that use the term "clox" are used for quantities that contain all inorganic chlorine species except HCl and ClONO2. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for isoprene is CH2=C(CH3)CH=CH2. The IUPAC name for isoprene is 2-methyl-buta-1,3-diene. Isoprene is a member of the group of hydrocarbons known as terpenes. There are standard names for the terpene group as well as for some of the individual species. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for isoprene is CH2=C(CH3)CH=CH2. The IUPAC name for isoprene is 2-methylbuta-1,3-diene. Isoprene is a member of the group of hydrocarbons known as terpenes. There are standard names for the terpene group as well as for some of the individual species. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for limonene is C10H16. The IUPAC name for limonene is 1-methyl-4-prop-1-en-2-yl-cyclohexene. Limonene is a member of the group of hydrocarbons known as terpenes. There are standard names for theterpene group as well as for some of the individual species. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for limonene is C10H16. The IUPAC name for limonene is 1-methyl-4-prop-1-en-2-ylcyclohexene. Limonene is a member of the group of hydrocarbons known as terpenes. There are standard names for the terpene group as well as for some of the individual species. - + 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Liquid_precipitation" includes both "rain" and "drizzle". "Rain" means drops of water falling through the atmosphere that have a diameter greater than 0.5 mm. "Drizzle" means drops of water falling through the atmosphere that have a diameter typically in the range 0.2-0.5 mm. + + + + 1 + + + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for methane isCH4. Methane is a member of the group of hydrocarbons known as alkanes. There are standard names for the alkane group as well as for some of the individual species. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for methane is CH4. Methane is a member of the group of hydrocarbons known as alkanes. There are standard names for the alkane group as well as for some of the individual species. @@ -9499,42 +10581,42 @@ totals are summed to obtain the index. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for ethanol isC2H5OH. The chemical formula for methanol is CH3OH. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for ethanol is C2H5OH. The chemical formula for methanol is CH3OH. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for methyl bromide is CH3Br. The IUPAC name for methyl bromide is bromomethane. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for methyl bromide is CH3Br. The IUPAC name for methyl bromide is bromomethane. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for methyl chloride is CH3Cl. The IUPAC name for methyl chloride is chloromethane. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for methyl chloride is CH3Cl. The IUPAC name for methyl chloride is chloromethane. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for methyl hydroperoxide is CH3OOH. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for methyl hydroperoxide is CH3OOH. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for methyl_peroxy_radical is CH3O2. In chemistry, a 'radical' is a highly reactive, and therefore shortlived, species. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for methyl_peroxy_radical is CH3O2. In chemistry, a "radical" is a highly reactive, and therefore short lived, species. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for molecular hydrogen is H2. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for molecular hydrogen is H2. @@ -9548,14 +10630,14 @@ totals are summed to obtain the index. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. In chemistry, a 'radical' is a highly reactive, and therefore shortlived, species. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". In chemistry, a "radical" is a highly reactive, and therefore short lived, species. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for nitric acid is HNO3. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for nitric acid is HNO3. @@ -9569,28 +10651,28 @@ totals are summed to obtain the index. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for nitrogen dioxide is NO2. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for nitrogen dioxide is NO2. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for nitrogen monoxide is NO. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for nitrogen monoxide is NO. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for nitrous acid is HNO2. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for nitrous acid is HNO2. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for nitrous oxide is N2O. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for nitrous oxide is N2O. @@ -9604,21 +10686,28 @@ totals are summed to obtain the index. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The term "peroxy_radicals" means all organic and inorganic peroxy radicals. This includes HO2 and all organic peroxyradicals, sometimes referred to as RO2. In chemistry, a 'radical' is a highly reactive, and therefore shortlived, species. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The term "peroxy_radicals" means all organic and inorganic peroxy radicals. This includes HO2 and all organic peroxy radicals, sometimes referred to as RO2. In chemistry, a "radical" is a highly reactive, and therefore short lived, species. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. "Noy" describes a family of chemical species. The family usually includes atomic nitrogen (N), nitrogen monoxide (NO), nitrogen dioxide (NO2), dinitrogen pentoxide (N2O5), nitric acid (HNO3), peroxynitric acid (HNO4), bromine nitrate (BrONO2) , chlorine nitrate (ClONO2) and organic nitrates (most notably peroxyacetyl nitrate, sometimes referred to as PAN, (CH3COO2NO2)). The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. "Noy" describes a family of chemical species. The family usually includes atomic nitrogen (N), nitrogen monoxide (NO), nitrogen dioxide (NO2), dinitrogen pentoxide (N2O5), nitric acid (HNO3), peroxynitric acid (HNO4), bromine nitrate (BrONO2) , chlorine nitrate (ClONO2) and organic nitrates (most notably peroxyacetyl nitrate, sometimes referred to as PAN, (CH3COO2NO2)). The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. + + + + 1 + + + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It is evaluated as the mass of X divided by the mass of Y (including X). It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. "Oxygenated" means containing oxygen. "Hydrocarbon" means a compound containing hydrogen and carbon. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Oxygenated" means containing oxygen. "Hydrocarbon" means a compound containing hydrogen and carbon. @@ -9646,21 +10735,21 @@ totals are summed to obtain the index. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for peroxyacetyl nitrate, sometimes referred to as PAN, is CH3COO2NO2. The IUPAC name for peroxyacetyl_nitrate is nitroethaneperoxoate. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for peroxyacetyl nitrate, sometimes referred to as PAN, is CH3COO2NO2. The IUPAC name for peroxyacetyl nitrate is nitroethaneperoxoate. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for peroxynitric acid, sometimes referred to as PNA, is HO2NO2. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for peroxynitric acid, sometimes referred to as PNA, is HO2NO2. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The term "peroxy_radicals" means all organic and inorganic peroxy radicals. This includes HO2 and all organic peroxyradicals, sometimes referred to as RO2. In chemistry, a 'radical' is a highly reactive, and therefore shortlived, species. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The term "peroxy_radicals" means all organic and inorganic peroxy radicals. This includes HO2 and all organic peroxy radicals, sometimes referred to as RO2. In chemistry, a "radical" is a highly reactive, and therefore short lived, species. @@ -9863,35 +10952,35 @@ totals are summed to obtain the index. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for propane isC3H8. Propane is a member of the group of hydrocarbons known as alkanes. There are standard names for the alkane group as well as for some of the individual species. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for propane is C3H8. Propane is a member of the group of hydrocarbons known as alkanes. There are standard names for the alkane group as well as for some of the individual species. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for propene isC3H6. Propene is a member of the group of hydrocarbons known as alkenes. There are standard names for the alkene group as well as for some of the individual species. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for propene is C3H6. Propene is a member of the group of hydrocarbons known as alkenes. There are standard names for the alkene group as well as for some of the individual species. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical symbol for radon is Rn. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical symbol for radon is Rn. 1 - The quantity with standard name mass_fraction_of_rainfall_falling_onto_surface_snow is the mass of rainfall falling onto snow as a fraction of the mass of rainfall falling within the area of interest. The phrase "surface_snow" means snow lying on the surface. Unless indicated in the cell_methods attribute, a quantity is assumed to apply to the whole area of each horizontal grid box. + The quantity with standard name mass_fraction_of_rainfall_falling_onto_surface_snow is the mass of rainfall falling onto snow as a fraction of the mass of rainfall falling within the area of interest. Surface snow refers to the snow on the solid ground or on surface ice cover, but excludes, for example, falling snowflakes and snow on plants. The surface called "surface" means the lower boundary of the atmosphere. Unless indicated in the cell_methods attribute, a quantity is assumed to apply to the whole area of each horizontal grid box. - + 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y'', where X is a material constituent of Y. It is evaluated as the mass of X divided by the mass of Y (including X). It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. Grain-size class distribution is based on the Udden-Wentworth scale. @@ -9915,18 +11004,32 @@ totals are summed to obtain the index. Mass fraction is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. "Secondary particulate organic matter" means particulate organic matter formed within the atmosphere from gaseous precursors. The sum of primary_particulate_organic_matter_dry_aerosol and secondary_particulate_organic_matter_dry_aerosol is particulate_organic_matter_dry_aerosol. + + 1 + + + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". Shallow convective cloud is nonprecipitating cumulus cloud with a cloud top below 3000m above the surface produced by the convection schemes in an atmosphere model. Some atmosphere models differentiate between shallow and deep convection. "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. + + + + 1 + + + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y'', where X is a material constituent of Y. It is evaluated as the mass of X divided by the mass of Y (including X). It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. Grain-size class distribution is based on the Udden-Wentworth scale. + + 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). + Mass fraction is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). "Snow" refers to the precipitating part of snow in the atmosphere – the cloud snow content is excluded. 1 - Solid precipitation refers to the precipitation of water in the solid phase. Water in the atmosphere exists in one of three phases: solid, liquid or vapor. The solid phase can exist as snow, hail, graupel, cloud ice, or as a component of aerosol. The quantity with standard name mass_fraction_of_solid_precipitation_falling_onto_surface_snow is the mass of solid precipitation falling onto snow as a fraction of the mass of solid precipitation falling within the area of interest. The phrase "surface_snow" means snow lying on the surface. Unless indicated in the cell_methods attribute, a quantity is assumed to apply to the whole area of each horizontal grid box. + The quantity with standard name mass_fraction_of_solid_precipitation_falling_onto_surface_snow is the mass of solid precipitation falling onto snow as a fraction of the mass of solid precipitation falling within the area of interest. Solid precipitation refers to the precipitation of water in the solid phase. Water in the atmosphere exists in one of three phases: solid, liquid or vapor. The solid phase can exist as snow, hail, graupel, cloud ice, or as a component of aerosol. Surface snow refers to the snow on the solid ground or on surface ice cover, but excludes, for example, falling snowflakes and snow on plants. Unless indicated in the cell_methods attribute, a quantity is assumed to apply to the whole area of each horizontal grid box. @@ -9940,7 +11043,7 @@ totals are summed to obtain the index. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. @@ -9961,21 +11064,21 @@ totals are summed to obtain the index. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for sulfuric acid is H2SO4. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. Terpenes are hydrocarbons, that is,they contain only hydrogen and carbon combined in the general proportions (C5H8)n where n is an integer greater than on equal to one. The term "terpenes" is used in standard names to describe the group of chemical species having this common structure that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. Standard names exist for some individual terpene species, e.g., isoprene and limonene. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". Terpenes are hydrocarbons, that is,they contain only hydrogen and carbon combined in the general proportions (C5H8)n where n is an integer greater than or equal to one. The term "terpenes" is used in standard names to describe the group of chemical species having this common structure that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. Standard names exist for some individual terpene species, e.g., isoprene and limonene. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for toluene isC6H5CH3. Toluene has the same structure as benzene, except that one of the hydrogen atomsis replaced by a methyl group. The systematic name for toluene is methylbenzene. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for toluene is C6H5CH3. Toluene has the same structure as benzene, except that one of the hydrogen atoms is replaced by a methyl group. The IUPAC name for toluene is methylbenzene. @@ -10017,7 +11120,14 @@ totals are summed to obtain the index. 1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for xylene is C6H4C2H6. In chemistry, xylene is a generic term for a group of three isomers of dimethylbenzene. The IUPAC names for the isomers are 1,2-dimethylbenzene, 1,3-dimethylbenzene and 1,4-dimethylbenzene. Xylene is an aromatic hydrocarbon. There are standard names that refer toaromatic_compounds as a group, as well as those for individual species. + "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for xylene is C6H4C2H6. In chemistry, xylene is a generic term for a group of three isomers of dimethylbenzene. The IUPAC names for the isomers are 1,2-dimethylbenzene, 1,3-dimethylbenzene and 1,4-dimethylbenzene. Xylene is an aromatic hydrocarbon. There are standard names that refer to aromatic compounds as a group, as well as those for individual species. + + + + 1 + + + The quantity with standard name mass_ratio_of_moisture_to_dry_soil is also known as the water content of a soil or the wet-basis gravimetric moisture content. It is the ratio of the mass of water (liquid and solid) to the mass of the dried sample. The phrase "ratio_of_X_to_Y" means X/Y. It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. @@ -10045,7 +11155,7 @@ totals are summed to obtain the index. 1 - Middle type clouds are: Altostratus, Altocumulus, Nimbostratus. "X_area_fraction" means the fraction of horizontal area occupied by X. Cloud area fraction is also called "cloud amount" and "cloud cover". X_type_cloud_area_fraction is generally determined on the basis of cloud type, though Numerical Weather Prediction (NWP) models often calculate them based on the vertical location of the cloud. + "Area fraction" is the fraction of a grid cell's horizontal area that has some characteristic of interest. It is evaluated as the area of interest divided by the grid cell area. It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. Middle type clouds are: Altostratus, Altocumulus, Nimbostratus. X_type_cloud_area_fraction is generally determined on the basis of cloud type, though Numerical Weather Prediction (NWP) models often calculate them based on the vertical location of the cloud. For the cloud area fraction between specified levels in the atmosphere, standard names including "cloud_area_fraction_in_atmosphere_layer" are used. Standard names referring only to "cloud_area_fraction" should be used for quantities for the whole atmosphere column. Cloud area fraction is also called "cloud amount" and "cloud cover". @@ -10062,6 +11172,20 @@ totals are summed to obtain the index. Depth is the vertical distance below the surface. 'Undersaturation' means that a solution is unsaturated with respect to a solute. Calcite is a mineral that is a polymorph of calcium carbonate. The chemical formula of calcite is CaCO3. Standard names also exist for aragonite, another polymorph of calcium carbonate. The "minimum depth of undersaturation", sometimes called the "saturation horizon", is the shallowest depth at which a body of water is an undersaturated solution of a named solute. + + 1 + + + The phrase "ratio_of_X_to_Y" means X/Y. It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. It is the lower limit of the water content at which a 3 mm diameter cylindrical soil sample will break in 3 to 10 mm pieces. It is the lower limit of the plastic state, which has the liquid limit as the upper bound. Known as the plastic limit. + + + + 1 + + + The phrase "ratio_of_X_to_Y" means X/Y. It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. It is the lower limit of the water content at which a soil sample will flow in a viscous manner. Known as the liquid limit. + + W m-2 @@ -10328,6 +11452,13 @@ totals are summed to obtain the index. Model level number should be understood as equivalent to layer number. + + 1 + + + The modified Fosberg Fire Weather Index (mFFWI) is a measure of the potential effect of weather conditions on wildland fire. The Fosberg Fire Weather Index is a function of temperature, wind, and humidity. It is modified with a fuel availability factor based on the Keetch Byram Drought Index. + + kg m-2 @@ -10349,6 +11480,13 @@ totals are summed to obtain the index. Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for aceto-nitrile is CH3CN. The IUPAC name for aceto-nitrile is ethanenitrile. + + mol m-3 + + + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The equivalent term in the NERC P01 Parameter Usage Vocabulary may be found at http://vocab.nerc.ac.uk/collection/P01/current/ATPXZZDZ/2/. + + mol m-3 @@ -10381,7 +11519,7 @@ totals are summed to obtain the index. mol m-3 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituentsof A. "nmvoc" means non methane volatile organic compounds; "nmvoc" is the term used in standard names to describe the group of chemical species having this classification that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. "Anthropogenic" means influenced,caused, or created by human activity. + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituentsof A. The abbreviation "nmvoc" means non methane volatile organic compounds; "nmvoc" is the term used in standard names to describe the group of chemical species having this classification that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. "Anthropogenic" means influenced, caused, or created by human activity. @@ -10395,7 +11533,7 @@ totals are summed to obtain the index. mol m-3 - 'Mole concentration' means number of moles per unit volume, also called"molarity", and is used in the construction mole_concentration_of_X_in_Y, whereX is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. Mole concentration at saturation means the mole concentration in a saturated solution.The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Aragonite is a mineral that is a polymorph of calcium carbonate. The chemical formula of aragonite is CaCO3. Standard names also exist for calcite, another polymorph of calcium carbonate. + Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Mole concentration at saturation" means the mole concentration in a saturated solution. The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Aragonite is a mineral that is a polymorph of calcium carbonate. The chemical formula of aragonite is CaCO3. Standard names also exist for calcite, another polymorph of calcium carbonate. @@ -10447,6 +11585,20 @@ totals are summed to obtain the index. Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. "nmvoc" means non methane volatile organic compounds; "nmvoc" is the term used in standard names to describe the group of chemical species having this classification that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. "Biogenic" means influenced, caused, or created by natural processes. + + mol m-3 + + + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction "A_expressed_as_B", where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. "Biological taxon" is a name or other label identifying an organism or a group of organisms as belonging to a unit of classification in a hierarchical taxonomy. There must be an auxiliary coordinate variable with standard name biological_taxon_name to identify the taxon in human readable format and optionally an auxiliary coordinate variable with standard name biological_taxon_lsid to provide a machine-readable identifier. See Section 6.1.2 of the CF convention (version 1.8 or later) for information about biological taxon auxiliary coordinate variables. + + + + mol m-3 + + + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction "A_expressed_as_B", where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. "Biological taxon" is a name or other label identifying an organism or a group of organisms as belonging to a unit of classification in a hierarchical taxonomy. There must be an auxiliary coordinate variable with standard name biological_taxon_name to identify the taxon in human readable format and optionally an auxiliary coordinate variable with standard name biological_taxon_lsid to provide a machine-readable identifier. See Section 6.1.2 of the CF convention (version 1.8 or later) for information about biological taxon auxiliary coordinate variables. + + mol m-3 @@ -10472,7 +11624,7 @@ totals are summed to obtain the index. mol m-3 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. "Brox" describes a family of chemical species consisting of inorganic bromine compounds with theexception of hydrogen bromide (HBr) and bromine nitrate (BrONO2). "Brox" is theterm used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity with a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. "Inorganic bromine", sometimes referred to as Bry, describes a family of chemical species which result from the degradation of source gases containing bromine (halons, methyl bromide, VSLS) and natural inorganic bromine sources such as volcanoes, seasalt and other aerosols. Standard names that use the term "inorganic_bromine" are used for quantities that contain all inorganic bromine species including HCland ClONO2. The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard nameis calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. "Brox" describes a family of chemical species consisting of inorganic bromine compounds with the exception of hydrogen bromide (HBr) and bromine nitrate (BrONO2). "Brox" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity with a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. "Inorganic bromine", sometimes referred to as Bry, describes a family of chemical species which result from the degradation of source gases containing bromine (halons, methyl bromide, VSLS) and natural inorganic bromine sources such as volcanoes, sea salt and other aerosols. Standard names that use the term "inorganic_bromine" are used for quantities that contain all inorganic bromine species including HCl and ClONO2. @@ -10486,7 +11638,7 @@ totals are summed to obtain the index. mol m-3 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'.The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. 'Calcareous phytoplankton' are phytoplankton that produce calcite. Phytoplankton are autotrophic prokaryotic or eukaryotic algae that live near the water surface where there is sufficient light to support photosynthesis. Calcite is a mineral that is a polymorphof calcium carbonate. The chemical formula of calcite is CaCO3. Standard names also existfor aragonite, another polymorph of calcium carbonate. + Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. "Calcareous phytoplankton" are phytoplankton that produce calcite. Phytoplankton are algae that grow where there is sufficient light to support photosynthesis. Calcite is a mineral that is a polymorph of calcium carbonate. The chemical formula of calcite is CaCO3. Standard names also exist for aragonite, another polymorph of calcium carbonate. @@ -10500,7 +11652,7 @@ totals are summed to obtain the index. mol m-3 - 'Mole concentration' means number of moles per unit volume, also called"molarity", and is used in the construction mole_concentration_of_X_in_Y, whereX is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. Mole concentration at saturation means the mole concentration in a saturated solution.The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Calcite is a mineral that is a polymorph of calcium carbonate. The chemical formula of calcite is CaCO3. Standard names also exist for aragonite, another polymorph of calcium carbonate. + Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Mole concentration at saturation" means the mole concentration in a saturated solution.The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Calcite is a mineral that is a polymorph of calcium carbonate. The chemical formula of calcite is CaCO3. Standard names also exist for aragonite, another polymorph of calcium carbonate. @@ -10528,7 +11680,7 @@ totals are summed to obtain the index. mol m-3 - 'Mole concentration' means number of moles per unit volume, also called"molarity", and is used in the construction mole_concentration_of_X_in_Y, whereX is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. The chemical formula of the carbonate anion is CO3 with a charge of minus two. + Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. The chemical formula of the carbonate anion is CO3 with a charge of minus two. @@ -10556,63 +11708,63 @@ totals are summed to obtain the index. mol m-3 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula of carbon tetrachloride is CCl4. + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula of carbon tetrachloride is CCl4. The IUPAC name for carbon tetrachloride is tetrachloromethane. mol m-3 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula of CFC113a CCl3CF3. The IUPAC name for CFC113a is 1,1,1-trichloro-2,2,2-trifluoro-ethane. + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula of CFC113a is CCl3CF3. The IUPAC name for CFC113a is 1,1,1-trichloro-2,2,2-trifluoroethane. mol m-3 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula of CFC113 is CCl2FCClF2. The IUPAC name for CFC113 is 1,1,2-trichloro-1,2,2-trifluoro-ethane. + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula of CFC113 is CCl2FCClF2. The IUPAC name for CFC113 is 1,1,2-trichloro-1,2,2-trifluoroethane. mol m-3 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula of CFC114 is CClF2CClF2. The IUPAC name for CFC114 is 1,2-dichloro-1,1,2,2-tetrafluoro-ethane. + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula of CFC114 is CClF2CClF2. The IUPAC name for CFC114 is 1,2-dichloro-1,1,2,2-tetrafluoroethane. mol m-3 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula of CFC115 is CClF2CF3. The IUPAC name for CFC115 is 1-chloro-1,1,2,2,2-pentafluoro-ethane. + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula of CFC115 is CClF2CF3. The IUPAC name for CFC115 is 1-chloro-1,1,2,2,2-pentafluoroethane. mol m-3 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula of CFC11 is CFCl3. The IUPAC name fof CFC11 is trichloro-fluoro-methane. + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula of CFC11 is CFCl3. The IUPAC name for CFC11 is trichloro(fluoro)methane. mol m-3 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula of CFC11 is CFCl3. The IUPAC name fof CFC11 is trichloro-fluoro-methane. + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula of CFC11 is CFCl3. The IUPAC name for CFC11 is trichloro(fluoro)methane. mol m-3 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for CFC12 is CF2Cl2. The IUPAC name for CFC12 is dichloro-difluoro-methane. + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for CFC12 is CF2Cl2. The IUPAC name for CFC12 is dichloro(difluoro)methane. mol m-3 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for CFC12 is CF2Cl2. The IUPAC name for CFC12 is dichloro-difluoro-methane. + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for CFC12 is CF2Cl2. The IUPAC name for CFC12 is dichloro(difluoro)methane. @@ -10640,7 +11792,7 @@ totals are summed to obtain the index. mol m-3 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. "Clox"describes a family of chemical species consisting of inorganic chlorine compounds with theexception of hydrogen chloride (HCl) and chlorine nitrate (ClONO2). "Clox" is the term used in standard names for all species belonging to the family that are representedwithin a given model. The list of individual species that are included in a quantity with a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, byusing a comment attribute. "Inorganic chlorine", sometimes referred to as Cly, describes a family of chemical species which result from the degradation of source gases containing chlorine (CFCs, HCFCs, VSLS) and natural inorganic chlorine sources such as seasaltand other aerosols. Standard names that use the term "inorganic_chlorine" are used for quantities that contain all inorganic chlorine species including HCl and ClONO2. The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculatedsolely with respect to the B contained in A, neglecting all other chemical constituents ofA. + Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction "A_expressed_as_B", where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. "Clox" describes a family of chemical species consisting of inorganic chlorine compounds with the exception of hydrogen chloride (HCl) and chlorine nitrate (ClONO2). "Clox" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity with a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. "Inorganic chlorine", sometimes referred to as "Cly", describes a family of chemical species which result from the degradation of source gases containing chlorine (CFCs, HCFCs, VSLS) and natural inorganic chlorine sources such as sea salt and other aerosols. Standard names that use the term "inorganic_chlorine" are used for quantities that contain all inorganic chlorine species including HCl and ClONO2. @@ -10654,14 +11806,14 @@ totals are summed to obtain the index. mol m-3 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated in terms of B alone, neglecting all other chemical constituents of A. + Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated in terms of B alone, neglecting all other chemical constituents of A. Diatoms are phytoplankton with an external skeleton made of silica. Phytoplankton are algae that grow where there is sufficient light to support photosynthesis. - + mol m-3 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'.The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. In ocean modelling, diazotrophs are phytoplankton of the phylum cyanobacteria distinct from other phytoplankton groups in their ability to fix nitrogen gas in addition to nitrate and ammonium. Phytoplankton are autotrophic prokaryotic or eukaryotic organisms that live near the water surface where there is sufficient light to support photosynthesis. + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction "A_expressed_as_B", where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Phytoplankton are algae that grow where there is sufficient light to support photosynthesis. Diazotrophic phytoplankton are phytoplankton (predominantly from Phylum Cyanobacteria) that are able to fix molecular nitrogen (gas or solute) in addition to nitrate and ammonium. @@ -10682,7 +11834,7 @@ totals are summed to obtain the index. mol m-3 - 'Mole concentration' means number of moles per unit volume, also called"molarity", and is used in the construction mole_concentration_of_X_in_Y, whereX is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for dimethyl sulfide is (CH3)2S. Dimethyl sulfide is sometimes referred to as DMS. + Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for dimethyl sulfide is (CH3)2S. Dimethyl sulfide is sometimes referred to as DMS. @@ -10713,11 +11865,18 @@ totals are summed to obtain the index. Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". In ocean biogeochemistry models, an "abiotic analogue" is used to simulate the effect on a modelled variable when biological effects on ocean carbon concentration and alkalinity are ignored. "Dissolved inorganic carbon" describes a family of chemical species in solution, including carbon dioxide, carbonic acid and the carbonate and bicarbonate anions. "Dissolved inorganic carbon" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. + + mol m-3 + + + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Dissolved inorganic carbon" describes a family of chemical species in solution, including carbon dioxide, carbonic acid and the carbonate and bicarbonate anions. "Dissolved inorganic carbon" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. "Sea floor sediment" is sediment deposited at the sea bed. "Water" means water in all phases. + + mol m-3 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. "Dissolved inorganic carbon" describes a family of chemical species in solution, including carbon dioxide, carbonic acid and the carbonate and bicarbonate anions. "Dissolved inorganic carbon" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the speciesrepresented, for example, by using a comment attribute. + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Dissolved inorganic carbon" describes a family of chemical species in solution, including carbon dioxide, carbonic acid and the carbonate and bicarbonate anions. "Dissolved inorganic carbon" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. @@ -10727,6 +11886,13 @@ totals are summed to obtain the index. Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". In ocean biogeochemistry models, a "natural analogue" is used to simulate the effect on a modelled variable of imposing preindustrial atmospheric carbon dioxide concentrations, even when the model as a whole may be subjected to varying forcings. "Dissolved inorganic carbon" describes a family of chemical species in solution, including carbon dioxide, carbonic acid and the carbonate and bicarbonate anions. "Dissolved inorganic carbon" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. + + mol m-3 + + + Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Inorganic nitrogen" describes a family of chemical species which, in an ocean model, usually includes nitrite, nitrate and ammonium which act as nitrogen nutrients. "Inorganic nitrogen" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. + + mol m-3 @@ -10745,14 +11911,21 @@ totals are summed to obtain the index. mol m-3 - 'Mole concentration' means number of moles per unit volume, also called"molarity", and is used in the construction mole_concentration_of_X_in_Y, whereX is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. + Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". + + + + mol m-3 + + + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for molecular nitrogen is N2. mol m-3 - 'Mole concentration' means number of moles per unit volume, also called"molarity", and is used in the construction mole_concentration_of_X_in_Y, whereX is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. + Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for molecular oxygen is O2. @@ -10766,14 +11939,56 @@ totals are summed to obtain the index. mol m-3 - 'Mole concentration' means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The concentration of any chemical species, whether particulate or dissolved, may vary with depth in the ocean. A depth profile may go through one or more local minima in concentration. The mole_concentration_of_molecular_oxygen_in_sea_water_at_shallowest_local_minimum_in_vertical_profile is the mole concentration of oxygen at the local minimum in the concentration profile that occurs closest to the sea surface. + Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The concentration of any chemical species, whether particulate or dissolved, may vary with depth in the ocean. A depth profile may go through one or more local minima in concentration. The mole_concentration_of_molecular_oxygen_in_sea_water_at_shallowest_local_minimum_in_vertical_profile is the mole concentration of oxygen at the local minimum in the concentration profile that occurs closest to the sea surface. The chemical formula for molecular oxygen is O2. + + + + mol m-3 + + + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Dissolved nitrogen" means the sum of all nitrogen in solution: inorganic nitrogen (nitrite, nitrate and ammonium) plus nitrogen in carbon compounds. + + + + mol m-3 + + + The sum of dissolved organic carbon-13 component concentrations. "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Organic carbon" describes a family of chemical species and is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. "C" means the element carbon and "13C" is the stable isotope "carbon-13", having six protons and seven neutrons. + + + + mol m-3 + + + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen' or a phrase such as "nox_expressed_as_nitrogen". "Organic carbon" describes a family of chemical species and is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. "Sea floor sediment" is sediment deposited at the sea bed. "Water" means water in all phases. mol m-3 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where Xis a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. Organic carbon describes a family of chemical species and is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a completedescription of the species represented, for example, by using a comment attribute. + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen' or a phrase such as "nox_expressed_as_nitrogen". "Organic carbon" describes a family of chemical species and is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. + + + + mol m-3 + + + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Dissolved organic nitrogen" describes the nitrogen held in carbon compounds in solution. These are mostly generated by plankton excretion and decay. + + + + mol m-3 + + + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Organic phosphorus" means phosphorus in carbon compounds. The equivalent term in the NERC P01 Parameter Usage Vocabulary may be found at http://vocab.nerc.ac.uk/collection/P01/current/ORGPDSZZ/4/. + + + + mol m-3 + + + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". Phosphorus means phosphorus in all chemical forms, commonly referred to as "total phosphorus". The equivalent term in the NERC P01 Parameter Usage Vocabulary may be found at http://vocab.nerc.ac.uk/collection/P01/current/TPHSDSZZ/6/. @@ -10801,7 +12016,7 @@ totals are summed to obtain the index. mol m-3 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for ethyne is HC2H. Ethyne is the IUPAC name for this species, which is also commonlyknown as acetylene. + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for ethyne is HC2H. Ethyne is the IUPAC name for this species, which is also commonly known as acetylene. @@ -10832,39 +12047,46 @@ totals are summed to obtain the index. Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical symbol for mercury is Hg. + + mol m-3 + + + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula of guanosine triphosphate is C10H16N5O14P3. + + mol m-3 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for halon1202 is CBr2F2. The IUPAC name for halon1202 is dibromo-difluoro-methane. + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for Halon1202 is CBr2F2. The IUPAC name for Halon1202 is dibromo(difluoro)methane. mol m-3 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for halon1211 is CBrClF2. The IUPAC name for halon1211 is bromo-chloro-difluoro-methane. + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for Halon1211 is CBrClF2. The IUPAC name for Halon1211 is bromo-chloro-difluoromethane. mol m-3 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for halon1301 is CBrF3. The IUPAC name for halon1301 is bromo-trifluoro-methane. + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for Halon1301 is CBrF3. The IUPAC name for Halon1301 is bromo(trifluoro)methane. mol m-3 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for halo2402 is C2Br2F4. The IUPAC name for halon2402 is 1,2-dibromo-1,1,2,2-tetrafluoro-ethane. + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for Halon2402 is C2Br2F4. The IUPAC name for Halon2402 is 1,2-dibromo-1,1,2,2-tetrafluoroethane. mol m-3 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for hcc140a is CH3CCl3. The IUPAC name for hcc140a is 1,1,1-trichloro-ethane. + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for HCC140a, also called methyl chloroform, is CH3CCl3. The IUPAC name for HCC140a is 1,1,1-trichloroethane. @@ -10885,7 +12107,7 @@ totals are summed to obtain the index. mol m-3 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for HCFC22 is CHClF2. The IUPAC name for HCFC22 is chloro-difluoro-methane. + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for HCFC22 is CHClF2. The IUPAC name for HCFC22 is chloro(difluoro)methane. @@ -10930,18 +12152,25 @@ totals are summed to obtain the index. Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for hydrogen peroxide is H2O2. + + mol m-3 + + + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula of hydrogen sulfide is H2S. + + mol m-3 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for the hydroperoxyl radical is HO2. In chemistry, a 'radical' is a highly reactive, and therefore shortlived, species. + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for the hydroperoxyl radical is HO2. In chemistry, a "radical" is a highly reactive, and therefore short lived, species. mol m-3 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for the hydroxyl radical is OH. In chemistry, a 'radical' is a highly reactive, and therefore shortlived, species. + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for the hydroxyl radical is OH. In chemistry, a "radical" is a highly reactive, and therefore short lived, species. @@ -10962,28 +12191,28 @@ totals are summed to obtain the index. mol m-3 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. "Inorganic bromine", sometimes referred to as Bry, describes a family of chemical species which result from the degradation of source gases containing bromine (halons, methyl bromide, VSLS) and natural inorganic bromine sources such as volcanoes, seasalt and other aerosols. "Inorganic bromine" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. Standard names that use the term "brox" are used for quantities that contain all inorganic bromine species except HBr and BrONO2. + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Inorganic bromine", sometimes referred to as Bry, describes a family of chemical species which result from the degradation of source gases containing bromine (halons, methyl bromide, VSLS) and natural inorganic bromine sources such as volcanoes, sea salt and other aerosols. "Inorganic bromine" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. Standard names that use the term "brox" are used for quantities that contain all inorganic bromine species except HBr and BrONO2. mol m-3 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. "Inorganic chlorine", sometimes referred to as Cly, describes a family of chemical species which result from the degradation of source gases containing chlorine (CFCs, HCFCs, VSLS) and natural inorganic chlorine sources such as seasalt and other aerosols. "Inorganic chlorine" is the term used in standard names for all species belonging to the family that arerepresented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. Standard names that use the term "clox"are used for quantities that contain all inorganic chlorine species except HCl and ClONO2. + Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Inorganic chlorine", sometimes referred to as Cly, describes a family of chemical species which result from the degradation of source gases containing chlorine (CFCs, HCFCs, VSLS) and natural inorganic chlorine sources such as sea salt and other aerosols. "Inorganic chlorine" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. Standard names that use the term "clox" are used for quantities that contain all inorganic chlorine species except HCl and ClONO2. mol m-3 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for isoprene is CH2=C(CH3)CH=CH2. The IUPAC name for isoprene is 2-methyl-buta-1,3-diene. Isoprene is a member of the group of hydrocarbons known as terpenes. There are standard names for the terpene group as well as for some of the individual species. + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for isoprene is CH2=C(CH3)CH=CH2. The IUPAC name for isoprene is 2-methylbuta-1,3-diene. Isoprene is a member of the group of hydrocarbons known as terpenes. There are standard names for the terpene group as well as for some of the individual species. mol m-3 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for methyl hydroperoxide is CH3OOH. + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for limonene is C10H16. The IUPAC name for limonene is 1-methyl-4-prop-1-en-2-ylcyclohexene. Limonene is a member of the group of hydrocarbons known as terpenes. There are standard names for the terpene group as well as for some of the individual species. @@ -11039,7 +12268,7 @@ totals are summed to obtain the index. mol m-3 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for methyl_peroxy_radical is CH3O2. In chemistry, a 'radical' is a highly reactive, and therefore shortlived, species. + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for methyl_peroxy_radical is CH3O2. In chemistry, a "radical" is a highly reactive, and therefore short lived, species. @@ -11060,14 +12289,14 @@ totals are summed to obtain the index. mol m-3 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'.The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. 'Miscellaneous phytoplankton' are all those phytoplankton that are not diatoms, diazotrophs, calcareous phytoplankton, picophytoplankton or other seperately named components of the phytoplankton population. Phytoplankton are autotrophic prokaryotic or eukaryotic algae that live near the water surface where there is sufficient light to support photosynthesis. + Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. "Miscellaneous phytoplankton" are all those phytoplankton that are not diatoms, diazotrophs, calcareous phytoplankton, picophytoplankton or other separately named components of the phytoplankton population. Phytoplankton are algae that grow where there is sufficient light to support photosynthesis. mol m-3 - Mole concentration' means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where Xis a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. 'Miscellaneous zooplankton' are all those zooplankton that are not mesozooplankton, microzooplankton or other seperately named components of the zooplankton population. + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. "Miscellaneous zooplankton" are all those zooplankton that are not mesozooplankton, microzooplankton or other separately named components of the zooplankton population. @@ -11095,7 +12324,7 @@ totals are summed to obtain the index. mol m-3 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. In chemistry, a 'radical' is a highly reactive, and therefore shortlived, species. + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". In chemistry, a "radical" is a highly reactive, and therefore short lived, species. @@ -11158,14 +12387,14 @@ totals are summed to obtain the index. mol m-3 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. "Nox" means a combination of two radical species containing nitrogen and oxygen: NO+NO2. The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solelywith respect to the B contained in A, neglecting all other chemical constituents of A. + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Nox" means a combination of two radical species containing nitrogen and oxygen: NO+NO2. The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. mol m-3 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. "Noy" describes a family of chemical species. The family usually includes atomic nitrogen (N), nitrogen monoxide (NO), nitrogen dioxide (NO2), dinitrogen pentoxide (N2O5), nitric acid (HNO3), peroxynitric acid (HNO4), bromine nitrate (BrONO2) , chlorine nitrate (ClONO2) and organic nitrates (most notably peroxyacetyl nitrate, sometimes referred to as PAN, (CH3COO2NO2)). The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name iscalculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. "Noy" describes a family of chemical species. The family usually includes atomic nitrogen (N), nitrogen monoxide (NO), nitrogen dioxide (NO2), dinitrogen pentoxide (N2O5), nitric acid (HNO3), peroxynitric acid (HNO4), bromine nitrate (BrONO2) , chlorine nitrate (ClONO2) and organic nitrates (most notably peroxyacetyl nitrate, sometimes referred to as PAN, (CH3COO2NO2)). The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. @@ -11196,56 +12425,133 @@ totals are summed to obtain the index. Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for ozone is O3. - + mol m-3 - 'Mole concentration' means number of moles per unit volume, also called"molarity", and is used in the construction mole_concentration_of_X_in_Y, whereX is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction "A_expressed_as_B", where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Particulate means suspended solids of all sizes. - + mol m-3 - 'Mole concentration' means number of moles per unit volume, also called"molarity", and is used in the construction mole_concentration_of_X_in_Y, whereX is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction "A_expressed_as_B", where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. - + mol m-3 - 'Mole concentration' means number of moles per unit volume, also called"molarity", and is used in the construction mole_concentration_of_X_in_Y, whereX is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction "A_expressed_as_B", where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Particulate means suspended solids of all sizes. - + mol m-3 - 'Mole concentration' means number of moles per unit volume, also called"molarity", and is used in the construction mole_concentration_of_X_in_Y, whereX is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction "A_expressed_as_B", where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Particulate means suspended solids of all sizes. - + mol m-3 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction "A_expressed_as_B", where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Particulate means suspended solids of all sizes. - + mol m-3 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for peroxyacetyl nitrate, sometimes referred to as PAN, is CH3COO2NO2. The IUPAC namefor peroxyacetyl_nitrate is nitroethaneperoxoate. + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction "A_expressed_as_B", where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Particulate means suspended solids of all sizes. Phosphorus means phosphorus in all chemical forms, commonly referred to as "total phosphorus". - + mol m-3 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for peroxynitric acid, sometimes referred to as PNA, is HO2NO2. + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction "A_expressed_as_B", where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Particulate means suspended solids of all sizes. - + + mol m-3 + + + Mole concentration means number of moles per unit volume, also called"molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. + + + + mol m-3 + + + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction "A_expressed_as_B", where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Particulate means suspended solids of all sizes. + + + + mol m-3 + + + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction "A_expressed_as_B", where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Particulate means suspended solids of all sizes. + + + + mol m-3 + + + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction "A_expressed_as_B", where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Particulate means suspended solids of all sizes. + + + + mol m-3 + + + Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. + + + + mol m-3 + + + Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. + + + + mol m-3 + + + Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. + + + + mol m-3 + + + Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. + + + + mol m-3 + + + Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Particulate organic nitrogen" means the sum of all organic nitrogen compounds that are solid, or bound to solid particles. "Organic nitrogen", when measured, always refers to all nitrogen incorporated in carbon compounds in the sample. Models may use the term to refer to nitrogen contained in specific groups of organic compounds in which case the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. + + + + mol m-3 + + + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for peroxyacetyl nitrate, sometimes referred to as PAN, is CH3COO2NO2. The IUPAC name for peroxyacetyl nitrate is nitroethaneperoxoate. + + + + mol m-3 + + + Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for peroxynitric acid, sometimes referred to as PNA, is HO2NO2. + + + mol m-3 @@ -11256,42 +12562,77 @@ totals are summed to obtain the index. mol m-3 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'.The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Phytoplankton are autotrophic prokaryotic or eukaryotic algae that live near the water surface where there is sufficient light to support photosynthesis. Standard names also exist for the mole concentration of a number of components that make up the total phytoplankton population, such as diatoms, diazotrophs, calcareous phytoplankton, picophytoplankton and miscellaneous phytoplankton. + Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Phytoplankton are algae that grow where there is sufficient light to support photosynthesis. Standard names also exist for the mole concentration of a number of components that make up the total phytoplankton population, such as diatoms, diazotrophs, calcareous phytoplankton, picophytoplankton and miscellaneous phytoplankton. mol m-3 - 'Mole concentration' means number of moles per unit volume, also called"molarity", and is used in the construction mole_concentration_of_X_in_Y, whereX is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Phytoplankton are autotrophic prokaryotic or eukaryotic algae that live near the water surface where there is sufficient light to support photosynthesis. + Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Phytoplankton are algae that grow where there is sufficient light to support photosynthesis. mol m-3 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. + Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Phytoplankton are algae that grow where there is sufficient light to support photosynthesis. mol m-3 - 'Mole concentration' means number of moles per unit volume, also called"molarity", and is used in the construction mole_concentration_of_X_in_Y, whereX is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Phytoplankton are autotrophic prokaryotic or eukaryotic algae that live near the water surface where there is sufficient light to support photosynthesis. + Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Phytoplankton are algae that grow where there is sufficient light to support photosynthesis. mol m-3 - 'Mole concentration' means number of moles per unit volume, also called"molarity", and is used in the construction mole_concentration_of_X_in_Y, whereX is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Phytoplankton are autotrophic prokaryotic or eukaryotic algae that live near the water surface where there is sufficient light to support photosynthesis. + Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Phytoplankton are algae that grow where there is sufficient light to support photosynthesis. mol m-3 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'.The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Picophytoplankton are phytoplankton of less than 2 micrometers in size. Phytoplankton are autotrophic prokaryotic or eukaryotic algae that live near the water surface where there is sufficient light to support photosynthesis. + Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Picophytoplankton are phytoplankton of less than 2 micrometers in size. Phytoplankton are algae that grow where there is sufficient light to support photosynthesis. + + + + mol m-3 + + + "Mole concentration" means the number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Dissolved inorganic carbon-13" is the sum of CO3_13C, HCO3_13C and H2CO3_13C. The subduction and subsequent transport of surface water carry into the interior ocean considerable quantities of dissolved inorganic carbon-13, which is entirely independent of biological activity (such as organic decomposition and oxidation) after the water leaves the sea surface. Such dissolved inorganic carbon-13 is termed “preformed” dissolved inorganic carbon-13 (Redfield,1942). + + + + mol m-3 + + + "Mole concentration" means the number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Dissolved inorganic carbon" is the sum of CO3, HCO3 and H2CO3. The subduction and subsequent transport of surface water carry into the interior ocean considerable quantities of dissolved inorganic carbon, which is entirely independent of biological activity (such as organic decomposition and oxidation) after the water leaves the sea surface. Such dissolved inorganic carbon is termed “preformed” dissolved inorganic carbon (Redfield,1942). + + + + mol m-3 + + + "Mole concentration" means the number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Dissolved inorganic phosphorus" means the sum of all inorganic phosphorus in solution (including phosphate, hydrogen phosphate, dihydrogen phosphate, and phosphoric acid). The subduction and subsequent transport of surface water carry into the interior ocean considerable quantities of nutrients, which are entirely independent of biological activity (such as organic decomposition and oxidation) after the water leaves the sea surface. Such nutrients are termed “preformed” nutrients (Redfield,1942). + + + + mol m-3 + + + "Mole concentration" means the number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The subduction and subsequent transport of surface water carry into the interior ocean considerable quantities of dissolved oxygen, which are entirely independent of biological activity (such as organic decomposition and oxidation) after the water leaves the sea surface. Such dissolved oxygen is termed “preformed” dissolved oxygen (Redfield,1942). + + + + mol m-3 + + + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction "A_expressed_as_B", where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. "Prokaryotes" means all Bacteria and Archaea excluding photosynthetic cyanobacteria such as Synechococcus and Prochlorococcus or other separately named components of the prokaryotic population. @@ -11340,7 +12681,7 @@ totals are summed to obtain the index. mol m-3 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula for toluene is C6H5CH3. Toluene has the same structure as benzene, except that one ofthe hydrogen atoms is replaced by a methyl group. The systematic name for toluene is methylbenzene. + "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for toluene is C6H5CH3. Toluene has the same structure as benzene, except that one of the hydrogen atoms is replaced by a methyl group. The IUPAC name for toluene is methylbenzene. @@ -11361,7 +12702,7 @@ totals are summed to obtain the index. mol m-3 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'.The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Standard names also exist for the mole concentration of a number of components that make up the total zooplankton population, such as mesozooplankton, microzooplankton andmiscellaneous zooplankton. + Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Standard names also exist for the mole concentration of a number of components that make up the total zooplankton population, such as mesozooplankton, microzooplankton and miscellaneous zooplankton. @@ -11410,7 +12751,7 @@ totals are summed to obtain the index. 1 - Mole fraction is used in the construction mole_fraction_of_X_in_Y, whereX is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemicalformula for acetic_acid is CH3COOH. The IUPAC name for acetic acid is ethanoic acid. + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for acetic acid is CH3COOH. The IUPAC name for acetic acid is ethanoic acid. @@ -11424,14 +12765,14 @@ totals are summed to obtain the index. 1 - Mole fraction is used in the construction mole_fraction_of_X_in_Y, whereX is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemicalformula for aceto-nitrile is CH3CN. The IUPAC name for aceto-nitrile is ethanenitrile. + "Mole fraction" is used in the construction mole_fraction_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for acetonitrile is CH3CN. The IUPAC name for acetonitrile is ethanenitrile. 1 - Mole fraction is used in the construction mole_fraction_of_X_in_Y, whereX is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. Aldehydes are organic compounds with a CHO group; "aldehydes" is the term used in standard names to describe the group of chemical species having this common structure that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. Standard names exist for formaldehyde as the simplest member of the aldehydes group. + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". Aldehydes are organic compounds with a CHO group; "aldehydes" is the term used in standard names to describe the group of chemical species having this common structure that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. Standard names exist for formaldehyde as the simplest member of the aldehydes group. @@ -11459,14 +12800,14 @@ totals are summed to obtain the index. 1 - Mole fraction is used in the construction mole_fraction_of_X_in_Y, whereX is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemicalformula for alpha_pinene is C10H16. The IUPAC name for alpha-pinene is (1S,5S)-2,6,6-trimethylbicyclo[3.1.1]hept-2-ene. + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for alpha-pinene is C10H16. The IUPAC name for alpha-pinene is (1S,5S)-2,6,6-trimethylbicyclo[3.1.1]hept-2-ene. 1 - Mole fraction is used in the construction mole_fraction_of_X_in_Y, whereX is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemicalformula for ammonia is NH3. + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for ammonia is NH3. @@ -11515,7 +12856,7 @@ totals are summed to obtain the index. 1 - Mole fraction is used in the construction mole_fraction_of_X_in_Y, whereX is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemicalformula for beta_pinene is C10H16. The IUPAC name for beta-pinene is (1S,5S)-6,6-dimethyl-2-methylenebicyclo[3.1.1]heptane. + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for beta pinene is C10H16. The IUPAC name for beta-pinene is (1S,5S)-6,6-dimethyl-2-methylenebicyclo[3.1.1]heptane. @@ -11546,18 +12887,32 @@ totals are summed to obtain the index. Mole fraction is used in the construction mole_fraction_of_X_in_Y, where X is a material constituent of Y. The chemical formula of bromine nitrate is BrONO2. + + mol mol-1 + + + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for bromochloromethane is CH2BrCl. The IUPAC name is bromochloromethane. + + + + mol mol-1 + + + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for bromodichloromethane is CHBrCl2. The IUPAC name is bromodichloromethane. + + 1 - Mole fraction is used in the construction mole_fraction_of_X_in_Y, whereX is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. "Brox" describes a family of chemical species consisting of inorganic bromine compounds withthe exception of hydrogen bromide (HBr) and bromine nitrate (BrONO2). "Brox" isthe term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity with a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. "Inorganic bromine", sometimes referred to as Bry, describes a family of chemical species which result from the degradation of source gases containing bromine (halons, methyl bromide, VSLS) and natural inorganic bromine sources such as volcanoes, seasalt and other aerosols. Standard names that use the term "inorganic_bromine" are used for quantities that contain all inorganic bromine species includingHCl and ClONO2. The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. "Brox" describes a family of chemical species consisting of inorganic bromine compounds with the exception of hydrogen bromide (HBr) and bromine nitrate (BrONO2). "Brox" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity with a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. "Inorganic bromine", sometimes referred to as Bry, describes a family of chemical species which result from the degradation of source gases containing bromine (halons, methyl bromide, VSLS) and natural inorganic bromine sources such as volcanoes, sea salt and other aerosols. Standard names that use the term "inorganic_bromine" are used for quantities that contain all inorganic bromine species including HCl and ClONO2. 1 - Mole fraction is used in the construction mole_fraction_of_X_in_Y, whereX is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemicalformula for butane is C4H10. Butane is a member of the group of hydrocarbons known as alkanes. There are standard names for the alkane group as well as for some of the individual species. + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for butane is C4H10. Butane is a member of the group of hydrocarbons known as alkanes. There are standard names for the alkane group as well as for some of the individual species. @@ -11567,6 +12922,13 @@ totals are summed to obtain the index. Mole fraction is used in the construction mole_fraction_of_X_in_Y, where X is a material constituent of Y. + + 1 + + + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "in_dry_air" means that the quantity is calculated as the total number of particles of X divided by the number of dry air particles, i.e. the effect of water vapor is excluded. The chemical formula for carbon dioxide is CO2. + + 1 @@ -11574,18 +12936,25 @@ totals are summed to obtain the index. Mole fraction is used in the construction mole_fraction_of_X_in_Y, where X is a material constituent of Y. + + 1 + + + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "in_dry_air" means that the quantity is calculated as the total number of particles of X divided by the number of dry air particles, i.e. the effect of water vapor is excluded. The chemical formula of carbon monoxide is CO. + + 1 - Mole fraction is used in the construction mole_fraction_of_X_in_Y, where X is a material constituent of Y. The chemical formula of carbon tetrachloride is CCl4. + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. The chemical formula of carbon tetrachloride is CCl4. The IUPAC name for carbon tetrachloride is tetrachloromethane. 1 - Mole fraction is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for carbon tetrafluoride is CF4. The IUPAC name for carbon tetrafluoride is tetrafluoromethane. + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for carbon tetrafluoride, also called PFC14, is CF4. The IUPAC name for carbon tetrafluoride is tetrafluoromethane. @@ -11606,42 +12975,49 @@ totals are summed to obtain the index. 1 - Mole fraction is used in the construction mole_fraction_of_X_in_Y, where X is a material constituent of Y. The chemical formula of CFC113a CCl3CF3. The IUPAC name for CFC113a is 1,1,1-trichloro-2,2,2-trifluoro-ethane. + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula of CFC113a is CCl3CF3. The IUPAC name for CFC113a is 1,1,1-trichloro-2,2,2-trifluoroethane. 1 - Mole fraction is used in the construction mole_fraction_of_X_in_Y, where X is a material constituent of Y. The chemical formula of CFC113 is CCl2FCClF2. The IUPAC name for CFC113 is 1,1,2-trichloro-1,2,2-trifluoro-ethane. + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula of CFC113 is CCl2FCClF2. The IUPAC name for CFC113 is 1,1,2-trichloro-1,2,2-trifluoroethane. 1 - Mole fraction is used in the construction mole_fraction_of_X_in_Y, where X is a material constituent of Y. The chemical formula of CFC114 is CClF2CClF2. The IUPAC name for CFC114 is 1,2-dichloro-1,1,2,2-tetrafluoro-ethane. + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula of CFC114 is CClF2CClF2. The IUPAC name for CFC114 is 1,2-dichloro-1,1,2,2-tetrafluoroethane. 1 - Mole fraction is used in the construction mole_fraction_of_X_in_Y, where X is a material constituent of Y. The chemical formula of CFC115 is CClF2CF3. The IUPAC name for CFC115 is 1-chloro-1,1,2,2,2-pentafluoro-ethane. + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula of CFC115 is CClF2CF3. The IUPAC name for CFC115 is 1-chloro-1,1,2,2,2-pentafluoroethane. 1 - Mole fraction is used in the construction mole_fraction_of_X_in_Y, where X is a material constituent of Y. The chemical formula of CFC11 is CFCl3. The IUPAC name for CFC11 is trichloro-fluoro-methane. + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula of CFC11 is CFCl3. The IUPAC name for CFC11 is trichloro(fluoro)methane. 1 - Mole fraction is used in the construction mole_fraction_of_X_in_Y, where X is a material constituent of Y. The chemical formula of CFC12 is CF2Cl2. The IUPAC name for CFC12 is dichloro-difluoro-methane. + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for CFC12 is CF2Cl2. The IUPAC name for CFC12 is dichloro(difluoro)methane. + + + + 1 + + + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for CFC13 is CF3Cl. The IUPAC name for CFC13 is chloro(trifluoro)methane. @@ -11665,25 +13041,53 @@ totals are summed to obtain the index. Mole fraction is used in the construction mole_fraction_of_X_in_Y, where X is a material constituent of Y. The chemical formula of chlorine nitrate is ClONO2. + + 1 + + + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for chloroform is CHCl3. The IUPAC name for chloroform is trichloromethane. + + 1 - Mole fraction is used in the construction mole_fraction_of_X_in_Y, whereX is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. "Clox" describes a family of chemical species consisting of inorganic chlorine compounds withthe exception of hydrogen chloride (HCl) and chlorine nitrate (ClONO2). "Clox" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity with a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. "Inorganic chlorine", sometimes referred to as Cly, describes a family of chemical species which result from the degradation of source gasescontaining chlorine (CFCs, HCFCs, VSLS) and natural inorganic chlorine sources such as seasalt and other aerosols. Standard names that use the term "inorganic_chlorine" are used for quantities that contain all inorganic chlorine species including HCl and ClONO2. The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. "Clox" describes a family of chemical species consisting of inorganic chlorine compounds with the exception of hydrogen chloride (HCl) and chlorine nitrate (ClONO2). "Clox" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity with a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. "Inorganic chlorine", sometimes referred to as Cly, describes a family of chemical species which result from the degradation of source gases containing chlorine (CFCs, HCFCs, VSLS) and natural inorganic chlorine sources such as sea salt and other aerosols. Standard names that use the term "inorganic_chlorine" are used for quantities that contain all inorganic chlorine species including HCl and ClONO2. + + + + mol mol-1 + + + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for dibromochloromethane is CHBr2Cl. The IUPAC name is dibromochloromethane. + + + + mol mol-1 + + + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for dibromomethane is CH2Br2. The IUPAC name is dibromomethane. 1 - Mole fraction is used in the construction mole_fraction_of_X_in_Y, whereX is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. Dichlorine is the molecular form of elemental chlorine with the chemical formula Cl2. + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". Dichlorine is the molecular form of elemental chlorine with the chemical formula Cl2. 1 - Mole fraction is used in the construction mole_fraction_of_X_in_Y, where X is a material constituent of Y. The chemical formula of dichlorine peroxide is Cl2O2. + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula of dichlorine peroxide is Cl2O2. + + + + 1 + + + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for dichloromethane is CH2Cl2. The IUPAC name is dichloromethane. @@ -11711,7 +13115,7 @@ totals are summed to obtain the index. 1 - Mole fraction is used in the construction mole_fraction_of_X_in_Y, whereX is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemicalformula for ethanol is C2H5OH. + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for ethanol is C2H5OH. @@ -11739,7 +13143,7 @@ totals are summed to obtain the index. 1 - Mole fraction is used in the construction mole_fraction_of_X_in_Y, whereX is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemicalformula for formic acid is HCOOH. The IUPAC name for formic acid is methanoic acid. + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for formic acid is HCOOH. The IUPAC name for formic acid is methanoic acid. @@ -11767,56 +13171,77 @@ totals are summed to obtain the index. 1 - Mole fraction is used in the construction mole_fraction_of_X_in_Y, where X is a material constituent of Y. The chemical formula of halon1202 is CBr2F2. The IUPAC name for halon 1202 is dibromo-difluoro-methane. + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for Halon1202 is CBr2F2. The IUPAC name for Halon1202 is dibromo(difluoro)methane. 1 - Mole fraction is used in the construction mole_fraction_of_X_in_Y, where X is a material constituent of Y. The chemical formula of halon1211 is CBrClF2. The IUPAC name for halon 1211 is bromo-chloro-difluoro-methane. + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for Halon1211 is CBrClF2. The IUPAC name for Halon1211 is bromo-chloro-difluoromethane. 1 - Mole fraction is used in the construction mole_fraction_of_X_in_Y, where X is a material constituent of Y. The chemical formula of halon1301 is CBrF3. The IUPAC name for halon 1301 is bromo-trifluoro-methane. + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for Halon1301 is CBrF3. The IUPAC name for Halon1301 is bromo(trifluoro)methane. 1 - Mole fraction is used in the construction mole_fraction_of_X_in_Y, where X is a material constituent of Y. The chemical formula of halon2402 is C2Br2F4. The IUPAC name for halon 2402 is 1,2-dibromo-1,1,2,2-tetrafluoro-ethane. + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for Halon2402 is C2Br2F4. The IUPAC name for Halon2402 is 1,2-dibromo-1,1,2,2-tetrafluoroethane. 1 - Mole fraction is used in the construction mole_fraction_of_X_in_Y, whereX is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemicalformula for hcc140a is CH3CCl3. The IUPAC name for hcc140a is 1,1,1-trichloro-ethane. + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for HCC140a, also called methyl chloroform, is CH3CCl3. The IUPAC name for HCC140a is 1,1,1-trichloroethane. + + + + 1 + + + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for hcfc124 is C2HClF4. The IUPAC name for hcfc124 is 1-chloro-1,2,2,2-tetrafluoroethane. + + + + 1 + + + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for HCFC132b is CH2ClCClF2. The IUPAC name for HCFC132b is 1,2-dichloro-1,1-difluoroethane. + + + + 1 + + + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for HCFC133a is CH2ClCF3. The IUPAC name for HCFC133a is 2-chloro-1,1,1-trifluoroethane. 1 - Mole fraction is used in the construction mole_fraction_of_X_in_Y, whereX is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemicalformula for HCFC141b is CH3CCl2F. The IUPAC name for HCFC141b is 1,1-dichloro-1-fluoroethane. + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for HCFC141b is CH3CCl2F. The IUPAC name for HCFC141b is 1,1-dichloro-1-fluoroethane. 1 - Mole fraction is used in the construction mole_fraction_of_X_in_Y, whereX is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemicalformula for HCFC142b is CH3CClF2. The IUPAC name for HCFC142b is 1-chloro-1,1-difluoroethane. + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for HCFC142b is CH3CClF2. The IUPAC name for HCFC142b is 1-chloro-1,1-difluoroethane. 1 - Mole fraction is used in the construction mole_fraction_of_X_in_Y, whereX is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemicalformula for HCFC22 is CHClF2. The IUPAC name for HCFC22 is chloro-difluoro-methane. + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for HCFC22 is CHClF2. The IUPAC name for HCFC22 is chloro(difluoro)methane. @@ -11826,11 +13251,88 @@ totals are summed to obtain the index. Mole fraction is used in the construction mole_fraction_of_X_in_Y, where X is a material constituent of Y. + + 1 + + + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for hfc125 is CF3CF2H. The IUPAC name for hfc125 is 1,1,1,2,2-pentafluoroethane. + + + + 1 + + + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for hfc134a is CF3CFH2. The IUPAC name for hfc134a is 1,1,1,2-tetrafluoroethane. + + + + 1 + + + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for hfc143a is CF3CH3. The IUPAC name for hfc143a is 1,1,1-trifluoroethane. + + + + 1 + + + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for hfc152a is CF2HCH3. The IUPAC name for hfc152a is 1,1-difluoroethane. + + + + 1 + + + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for hfc227ea is C3HF7. The IUPAC name for hfc227ea is 1,1,1,2,3,3,3-heptafluoropropane. + + + + 1 + + + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for hfc236fa is C3H2F6. The IUPAC name for hfc236fa is 1,1,1,3,3,3-hexafluoropropane. + + + + 1 + + + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for hfc23 is CF3H. The IUPAC name for hfc23 is trifluoromethane. + + + + 1 + + + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for hfc245fa is C3H3F5. The IUPAC name for hfc245fa is 1,1,1,3,3-pentafluoropropane. + + + + 1 + + + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for hfc32 is CF2H2. The IUPAC name for hfc32 is difluoromethane. + + + + 1 + + + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for hfc365mfc is C4H5F5. The IUPAC name for hfc365mfc is 1,1,1,3,3-pentafluorobutane. + + + + 1 + + + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for hfc4310mee is C5H2F10. The IUPAC name for hfc4310mee is 1,1,1,2,2,3,4,5,5,5-decafluoropentane. + + 1 - Mole fraction is used in the construction mole_fraction_of_X_in_Y, whereX is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. "HOx" means a combination of two radical species containing hydrogen and oxygen: OH and HO2. The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. "HOx" means a combination of two radical species containing hydrogen and oxygen: OH and HO2. @@ -11900,7 +13402,7 @@ totals are summed to obtain the index. 1 - Mole fraction is used in the construction mole_fraction_of_X_in_Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. "Inorganic bromine", sometimes referred to as Bry, describes a family of chemical species which result from the degradation of bromine containing source gases (halons, methyl bromide, VSLS) and natural inorganic bromine sources such as volcanoes, seasalt and other aerosols. "Inorganic bromine" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. Standard names that use the term "brox" are used for quantities that contain all inorganic bromine species except HBr and BrONO2. + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Inorganic bromine", sometimes referred to as Bry, describes a family of chemical species which result from the degradation of bromine containing source gases (halons, methyl bromide, VSLS) and natural inorganic bromine sources such as volcanoes, sea salt and other aerosols. "Inorganic bromine" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. Standard names that use the term "brox" are used for quantities that contain all inorganic bromine species except HBr and BrONO2. @@ -11914,14 +13416,14 @@ totals are summed to obtain the index. 1 - Mole fraction is used in the construction mole_fraction_of_X_in_Y, where X is a material constituent of Y. + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for isoprene is CH2=C(CH3)CH=CH2. The IUPAC name for isoprene is 2-methylbuta-1,3-diene. Isoprene is a member of the group of hydrocarbons known as terpenes. There are standard names for the terpene group as well as for some of the individual species. 1 - Mole fraction is used in the construction mole_fraction_of_X_in_Y, whereX is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemicalformula for limonene is C10H16. The IUPAC name for limonene is 1-methyl-4-prop-1-en-2-yl-cyclohexene. Limonene is a member of the group of hydrocarbons known as terpenes. There arestandard names for the terpene group as well as for some of the individual species. + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for limonene is C10H16. The IUPAC name for limonene is 1-methyl-4-prop-1-en-2-ylcyclohexene. Limonene is a member of the group of hydrocarbons known as terpenes. There are standard names for the terpene group as well as for some of the individual species. @@ -11931,18 +13433,18 @@ totals are summed to obtain the index. Mole fraction is used in the construction mole_fraction_of_X_in_Y, where X is a material constituent of Y. - + 1 - Mole fraction is used in the construction mole_fraction_of_X_in_Y, whereX is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemicalformula for methanol is CH3OH. + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "in_dry_air" means that the quantity is calculated as the number of particles of X divided by the number of dry air particles, i.e. the effect of water vapor is excluded. The chemical formula for methane is CH4. Methane is a member of the group of hydrocarbons known as alkanes. There are standard names for the alkane group as well as for some of the individual species. - + 1 - Mole fraction is used in the construction mole_fraction_of_X_in_Y, whereX is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. Methylglyoxal is an organic molecule with the chemical formula CH3COCHO. It is also called pyruvaldehyde or 2-oxopropanal. + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for methanol is CH3OH. @@ -11959,6 +13461,13 @@ totals are summed to obtain the index. Mole fraction is used in the construction mole_fraction_of_X_in_Y, where X is a material constituent of Y. The chemical formula of methyl chloride is CH3Cl. + + 1 + + + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". Methylglyoxal is an organic molecule with the chemical formula CH3COCHO. It is also called pyruvaldehyde or 2-oxopropanal. + + 1 @@ -11970,7 +13479,7 @@ totals are summed to obtain the index. 1 - Mole fraction is used in the construction mole_fraction_of_X_in_Y, whereX is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemicalformula for methyl_peroxy_radical is CH3O2. In chemistry, a 'radical' is a highly reactive, and therefore shortlived, species. + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for methyl peroxy radical is CH3O2. In chemistry, a "radical" is a highly reactive, and therefore short lived, species. @@ -11984,7 +13493,7 @@ totals are summed to obtain the index. 1 - Mole fraction is used in the construction mole_fraction_of_X_in_Y, whereX is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. In chemistry, a 'radical' is a highly reactive, and therefore shortlived, species. + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". In chemistry, a "radical" is a highly reactive, and therefore short lived, species. @@ -12015,11 +13524,18 @@ totals are summed to obtain the index. Mole fraction is used in the construction mole_fraction_of_X_in_Y, where X is a material constituent of Y. + + 1 + + + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for nitrogen trifluoride is NF3. Nitrogen trifluoride is the IUPAC name. + + 1 - Mole fraction is used in the construction mole_fraction_of_X_in_Y, whereX is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemicalformula for nitrous acid is HNO2. + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for nitrous acid is HNO2. @@ -12029,6 +13545,13 @@ totals are summed to obtain the index. Mole fraction is used in the construction mole_fraction_of_X_in_Y, where X is a material constituent of Y. The chemical formula of nitrous oxide is N2O. + + 1 + + + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "in_dry_air" means that the quantity is calculated as the number of particles of X divided by the number of dry air particles, i.e. the effect of water vapor is excluded. The chemical formula for nitrous oxide is N2O. + + 1 @@ -12040,14 +13563,14 @@ totals are summed to obtain the index. 1 - Mole fraction is used in the construction mole_fraction_of_X_in_Y, whereX is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. "Nox" means a combination of two radical species containing nitrogen and oxygen: NO+NO2. The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. "Nox" means a combination of two radical species containing nitrogen and oxygen: NO+NO2. 1 - Mole fraction is used in the construction mole_fraction_of_X_in_Y, whereX is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. "Noy" describes a family of chemical species. The family usually includes atomic nitrogen (N), nitrogen monoxide (NO), nitrogen dioxide (NO2), dinitrogen pentoxide (N2O5), nitric acid (HNO3), peroxynitric acid (HNO4), bromine nitrate (BrONO2) , chlorine nitrate (ClONO2) and organic nitrates (most notably peroxyacetyl nitrate, sometimes referred to as PAN, (CH3COO2NO2)). The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. The phrase 'expressed_as' is used in the construction A_expressed_as_B, whereB is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. "Noy" describes a family of chemical species. The family usually includes atomic nitrogen (N), nitrogen monoxide (NO), nitrogen dioxide (NO2), dinitrogen pentoxide (N2O5), nitric acid (HNO3), peroxynitric acid (HNO4), bromine nitrate (BrONO2) , chlorine nitrate (ClONO2) and organic nitrates (most notably peroxyacetyl nitrate, sometimes referred to as PAN, (CH3COO2NO2)). The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. @@ -12071,6 +13594,13 @@ totals are summed to obtain the index. Mole fraction is used in the construction mole_fraction_of_X_in_Y, where X is a material constituent of Y. + + 1 + + + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for perchloroethene is CCl2CCl2. The IUPAC name for perchloroethene is tetrachloroethene. + + 1 @@ -12085,6 +13615,27 @@ totals are summed to obtain the index. Mole fraction is used in the construction mole_fraction_of_X_in_Y, where X is a material constituent of Y. The chemical formula of peroxynitric acid is HNO4. + + 1 + + + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for pfc116 is C2F6. The IUPAC name for pfc116 is hexafluoroethane. + + + + 1 + + + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for pfc218 is C3F8. The IUPAC name for pfc218 is octafluoropropane. + + + + 1 + + + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for pfc318 is c-C4F8. The IUPAC name for pfc318 is octafluorocyclobutane. + + 1 @@ -12103,7 +13654,7 @@ totals are summed to obtain the index. 1 - Mole fraction is used in the construction mole_fraction_of_X_in_Y, whereX is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemicalsymbol for radon is Rn. + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical symbol for radon is Rn. @@ -12120,11 +13671,25 @@ totals are summed to obtain the index. Mole fraction is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula of sulfur hexafluoride is SF6. + + 1 + + + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for sulfuryl fluoride is SO2F2. Sulfuryl fluoride is the IUPAC name. + + 1 - Mole fraction is used in the construction mole_fraction_of_X_in_Y, where X is a material constituent of Y. + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for toluene is C6H5CH3. Toluene has the same structure as benzene, except that one of the hydrogen atoms is replaced by a methyl group. The IUPAC name for toluene is methylbenzene. + + + + mol mol-1 + + + "Mole fraction" is used in the construction "mole_fraction_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for tribromomethane is CHBr3. The IUPAC name is tribromomethane. @@ -12145,35 +13710,113 @@ totals are summed to obtain the index. 1 - Mole ratio is used in the construction mole_ratio_of_X_to_Y_in_medium, where X and Y are both material constituents of the medium. "Medium" can take anyof the values given in the "Small scale medium" section of the standard name Guidelines document. "ratio_of_X_to_Y" means X/Y. The chemical formula for the nitrate anion is NO3-. The chemical formula of the phosphate anion is PO4 with a charge of minus three. + "Mole ratio" is used in the construction "mole_ratio_of_X_to_Y_in_medium", where X and Y are both material constituents of the medium. "Medium" can take any of the values given in the "medium" section of the standard name Guidelines document. The phrase "ratio_of_X_to_Y" means X/Y. The chemical formula for the nitrate anion is NO3-. The chemical formula of the phosphate anion is PO4 with a charge of minus three. - + mol kg-1 - moles_of_X_per_unit_mass_inY is also called "molality" of X in Y, where X is a material constituent of Y. A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The chemical formula of CFC11 is CFCl3. The IUPAC name fof CFC11 is trichloro-fluoro-methane. + The construction "moles_of_X_per_unit_mass_in_Y" is also called "molality" of X in Y, where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The equivalent term in the NERC P01 Parameter Usage Vocabulary may be found at http://vocab.nerc.ac.uk/collection/P01/current/ATPXZZDZ/2/. - + mol kg-1 - moles_of_X_per_unit_mass_inY is also called "molality" of X in Y, where X is a material constituent of Y. + The construction "moles_of_X_per_unit_mass_in_Y" is also called "molality" of X in Y, where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula of ammonium is NH4. The equivalent term in the NERC P01 Parameter Usage Vocabulary may be found at http://vocab.nerc.ac.uk/collection/P01/current/MDMAP004/3/. - + mol kg-1 - moles_of_X_per_unit_mass_inY is also called "molality" of X in Y, where X is a material constituent of Y. + The construction "moles_of_X_per_unit_mass_in_Y" is also called "molality" of X in Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula of CFC11 is CFCl3. The IUPAC name for CFC11 is trichloro(fluoro)methane. - + mol kg-1 - moles_of_X_per_unit_mass_inY is also called "molality" of X in Y, where X is a material constituent of Y. + The construction "moles_of_X_per_unit_mass_in_Y" is also called "molality" of X in Y, where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Dissolved inorganic carbon" describes a family of chemical species in solution, including carbon dioxide, carbonic acid and the carbonate and bicarbonate anions. "Dissolved inorganic carbon" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. + + + + mol kg-1 + + + The construction "moles_of_X_per_unit_mass_in_Y" is also called "molality" of X in Y, where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Dissolved nitrogen" means the sum of all nitrogen in solution: inorganic nitrogen (nitrite, nitrate and ammonium) plus nitrogen in carbon compounds. + + + + mol kg-1 + + + The construction "moles_of_X_per_unit_mass_in_Y" is also called "molality" of X in Y, where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Organic carbon" describes a family of chemical species and is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. The equivalent term in the NERC P01 Parameter Usage Vocabulary may be found at http://vocab.nerc.ac.uk/collection/P01/current/CORGZZKG/1/. + + + + mol kg-1 + + + The construction "moles_of_X_per_unit_mass_in_Y" is also called "molality" of X in Y, where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Dissolved organic nitrogen" describes the nitrogen held in carbon compounds in solution. These are mostly generated by plankton excretion and decay. The equivalent term in the NERC P01 Parameter Usage Vocabulary may be found at http://vocab.nerc.ac.uk/collection/P01/current/MDMAP008/3/. + + + + mol kg-1 + + + The construction "moles_of_X_per_unit_mass_in_Y" is also called "molality" of X in Y, where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen”. "Organic phosphorus" means phosphorus in carbon compounds. The equivalent term in the NERC P01 Parameter Usage Vocabulary may be found at http://vocab.nerc.ac.uk/collection/P01/current/ORGPMSZZ/4/. + + + + mol kg-1 + + + The construction "moles_of_X_per_unit_mass_in_Y" is also called "molality" of X in Y, where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". “Phosphorus” means phosphorus in all chemical forms, commonly referred to as "total phosphorus". The equivalent term in the NERC P01 Parameter Usage Vocabulary may be found at +http://vocab.nerc.ac.uk/collection/P01/current/TPHSDSZZ/6/. + + + + mol kg-1 + + + The construction "moles_of_X_per_unit_mass_in_Y" is also called "molality" of X in Y, where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula of guanosine triphosphate is C10H16N5O14P3. + + + + mol kg-1 + + + The construction "moles_of_X_per_unit_mass_in_Y" is also called "molality" of X in Y, where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for hydrogen peroxide is H2O2. + + + + mol kg-1 + + + moles_of_X_per_unit_mass_inY is also called "molality" of X in Y, where X is a material constituent of Y. + + + + mol kg-1 + + + moles_of_X_per_unit_mass_inY is also called "molality" of X in Y, where X is a material constituent of Y. + + + + mol kg-1 + + + moles_of_X_per_unit_mass_inY is also called "molality" of X in Y, where X is a material constituent of Y. + + + + mol kg-1 + + + The construction "moles_of_X_per_unit_mass_in_Y" is also called "molality" of X in Y, where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula for nitrous oxide is N2O. The chemical formula for nitrous oxide is N2O. The equivalent term in the NERC P01 Parameter Usage Vocabulary may be found at http://vocab.nerc.ac.uk/collection/P01/current/DN2OZZ01/. @@ -12183,6 +13826,62 @@ totals are summed to obtain the index. moles_of_X_per_unit_mass_inY is also called "molality" of X in Y, where X is a material constituent of Y. + + mol kg-1 + + + The construction "moles_of_X_per_unit_mass_in_Y" is also called "molality" of X in Y, where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". Particulate means suspended solids of all sizes. Biogenic silica is a hydrated form of silica (silicon dioxide) with the chemical formula SiO2.nH2O sometimes referred to as opaline silica or opal. It is created by biological processes and in sea water it is predominantly the skeletal material of diatoms. + + + + mol kg-1 + + + The construction "moles_of_X_per_unit_mass_in_Y" is also called "molality" of X in Y, where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". Particulate means suspended solids of all sizes. Particulate inorganic carbon is carbon bound in molecules ionically that may be liberated from the particles as carbon dioxide by acidification. + + + + mol kg-1 + + + The construction "moles_of_X_per_unit_mass_in_Y" is also called "molality" of X in Y, where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Particulate means suspended solids of all sizes. The equivalent term in the NERC P01 Parameter Usage Vocabulary may be found at http://vocab.nerc.ac.uk/collection/P01/current/MDMAP011/4/. + + + + mol kg-1 + + + The construction "moles_of_X_per_unit_mass_in_Y" is also called "molality" of X in Y, where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Particulate means suspended solids of all sizes. The equivalent term in the NERC P01 Parameter Usage Vocabulary may be found at http://vocab.nerc.ac.uk/collection/P01/current/MDMAP013/4/. + + + + mol kg-1 + + + The construction "moles_of_X_per_unit_mass_in_Y" is also called "molality" of X in Y, where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Particulate means suspended solids of all sizes. Phosphorus means phosphorus in all chemical forms, commonly referred to as "total phosphorus". The equivalent term in the NERC P01 Parameter Usage Vocabulary may be found at http://vocab.nerc.ac.uk/collection/P01/current/TPHSVLPT/5/. + + + + mol kg-1 + + + The construction "moles_of_X_per_unit_mass_in_Y" is also called "molality" of X in Y, where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Particulate means suspended solids of all sizes. + + + + mol kg-1 + + + The construction "moles_of_X_per_unit_mass_in_Y" is also called "molality" of X in Y, where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Particulate means suspended solids of all sizes. + + + + mol kg-1 + + + The construction "moles_of_X_per_unit_mass_in_Y" is also called "molality" of X in Y, where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Particulate means suspended solids of all sizes. Phosphorus means phosphorus in all chemical forms, commonly referred to as "total phosphorus". The equivalent term in the NERC P01 Parameter Usage Vocabulary may be found at http://vocab.nerc.ac.uk/collection/P01/current/OPHSVLPT/6/. + + mol kg-1 @@ -12197,6 +13896,27 @@ totals are summed to obtain the index. moles_of_X_per_unit_mass_inY is also called "molality" of X in Y, where X is a material constituent of Y. + + mol kg-1 + + + The construction "moles_of_X_per_unit_mass_in_Y" is also called "molality" of X in Y, where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The chemical formula of sulfur hexafluoride is SF6. + + + + 1 + + + A quality flag that reports the result of the Multi-variate test, which checks that values are reasonable when compared with related variables. The linkage between the data variable and this variable is achieved using the ancillary_variables attribute. There are standard names for other specific quality tests which take the form of X_quality_flag. Quality information that does not match any of the specific quantities should be given the more general standard name of quality_flag. + + + + 1 + + + A quality flag that reports the result of the Neighbor test, which checks that values are reasonable when compared with nearby measurements. The linkage between the data variable and this variable is achieved using the ancillary_variables attribute. There are standard names for other specific quality tests which take the form of X_quality_flag. Quality information that does not match any of the specific quantities should be given the more general standard name of quality_flag. + + W m-2 @@ -12222,7 +13942,7 @@ totals are summed to obtain the index. W m-2 - "Downward" indicates a vector component which is positive when directed downward (negative upward). Net downward radiation is the difference between radiation from above (downwelling) and radiation from below (upwelling). In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The surface called "surface" means the lower boundary of the atmosphere. "shortwave" means shortwave radiation. + "Downward" indicates a vector component which is positive when directed downward (negative upward). Net downward radiation is the difference between radiation from above (downwelling) and radiation from below (upwelling). In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The term "shortwave" means shortwave radiation. The phrase "sea water surface" means the upper boundary of the liquid portion of an ocean or sea, including the boundary to floating ice if present. @@ -12243,56 +13963,56 @@ totals are summed to obtain the index. mol m-2 s-1 - "Production of carbon" means the production of biomass expressed as the mass of carbon which it contains. Net primary production is the excess of gross primary production (rate of synthesis of biomass from inorganic precursors) by autotrophs ("producers"), for example, photosynthesis in plants or phytoplankton, over the rate at which the autotrophs themselves respire some of this biomass. "Productivity" means production per unit area. Phytoplankton are autotrophic prokaryotic or eukaryotic algae that live near the water surface where there is sufficient light to support photosynthesis. "Calcareous phytoplankton" are phytoplankton that produce calcite. The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Calcite is a mineral that is a polymorph of calcium carbonate. The chemical formula of calcite is CaCO3. Standard names also exist for aragonite, another polymorph of calcium carbonate. + "Production of carbon" means the production of biomass expressed as the mass of carbon which it contains. Net primary production is the excess of gross primary production (rate of synthesis of biomass from inorganic precursors) by autotrophs ("producers"), for example, photosynthesis in plants or phytoplankton, over the rate at which the autotrophs themselves respire some of this biomass. "Productivity" means production per unit area. The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Phytoplankton are algae that grow where there is sufficient light to support photosynthesis. "Calcareous phytoplankton" are phytoplankton that produce calcite. Calcite is a mineral that is a polymorph of calcium carbonate. The chemical formula of calcite is CaCO3. Standard names also exist for aragonite, another polymorph of calcium carbonate. mol m-2 s-1 - "Production of carbon" means the production of biomass expressed as the mass of carbon which it contains. Net primary production is the excess of gross primary production (rate of synthesis of biomass from inorganic precursors) by autotrophs ("producers"), for example, photosynthesis in plants or phytoplankton, over the rate at which the autotrophs themselves respire some of this biomass. "Productivity" means production per unit area. Diatoms are single-celled phytoplankton with an external skeleton made of silica. Phytoplankton are autotrophic prokaryotic or eukaryotic algae that live near the water surface where there is sufficient light to support photosynthesis. The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. + "Production of carbon" means the production of biomass expressed as the mass of carbon which it contains. Net primary production is the excess of gross primary production (rate of synthesis of biomass from inorganic precursors) by autotrophs ("producers"), for example, photosynthesis in plants or phytoplankton, over the rate at which the autotrophs themselves respire some of this biomass. "Productivity" means production per unit area. The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Diatoms are single-celled phytoplankton with an external skeleton made of silica. Phytoplankton are algae that grow where there is sufficient light to support photosynthesis. - + mol m-2 s-1 - "Production of carbon" means the production of biomass expressed as the mass of carbon which it contains. Net primary production is the excess of gross primary production (rate of synthesis of biomass from inorganic precursors) by autotrophs ("producers"), for example, photosynthesis in plants or phytoplankton, over the rate at which the autotrophs themselves respire some of this biomass. "Productivity" means production per unit area. In ocean modelling, diazotrophs are phytoplankton of the phylum cyanobacteria distinct from other phytoplankton groups in their ability to fix nitrogen gas in addition to nitrate and ammonium. Phytoplankton are autotrophic prokaryotic or eukaryotic algae that live near the water surface where there is sufficient light to support photosynthesis. The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. + "Production of carbon" means the production of biomass expressed as the mass of carbon which it contains. Net primary production is the excess of gross primary production (rate of synthesis of biomass from inorganic precursors) by autotrophs ("producers"), for example, photosynthesis in plants or phytoplankton, over the rate at which the autotrophs themselves respire some of this biomass. "Productivity" means production per unit area. The phrase "expressed_as" is used in the construction "A_expressed_as_B", where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Phytoplankton are algae that grow where there is sufficient light to support photosynthesis. Diazotrophic phytoplankton are phytoplankton (predominantly from Phylum Cyanobacteria) that are able to fix molecular nitrogen (gas or solute) in addition to nitrate and ammonium. mol m-2 s-1 - "Production of carbon" means the production of biomass expressed as the mass of carbon which it contains. Net primary production is the excess of gross primary production (rate of synthesis of biomass from inorganic precursors) by autotrophs ("producers"), for example, photosynthesis in plants or phytoplankton, over the rate at which the autotrophs themselves respire some of this biomass. "Productivity" means production per unit area. "Miscellaneous phytoplankton" are all those phytoplankton that are not diatoms, diazotrophs, calcareous phytoplankton, picophytoplankton or other seperately named components of the phytoplankton population. Phytoplankton are autotrophic prokaryotic or eukaryotic algae that live near the water surface where there is sufficient light to support photosynthesis. The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. + "Production of carbon" means the production of biomass expressed as the mass of carbon which it contains. Net primary production is the excess of gross primary production (rate of synthesis of biomass from inorganic precursors) by autotrophs ("producers"), for example, photosynthesis in plants or phytoplankton, over the rate at which the autotrophs themselves respire some of this biomass. "Productivity" means production per unit area. The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. "Miscellaneous phytoplankton" are all those phytoplankton that are not diatoms, diazotrophs, calcareous phytoplankton, picophytoplankton or other separately named components of the phytoplankton population. Phytoplankton are algae that grow where there is sufficient light to support photosynthesis. mol m-2 s-1 - "Production of carbon" means the production of biomass expressed as the mass of carbon which it contains. Net primary production is the excess of gross primary production (rate of synthesis of biomass from inorganic precursors) by autotrophs ("producers"), for example, photosynthesis in plants or phytoplankton, over the rate at which the autotrophs themselves respire some of this biomass. "Productivity" means production per unit area. Phytoplankton are autotrophic prokaryotic or eukaryotic algae that live near the water surface where there is sufficient light to support photosynthesis. The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. + "Production of carbon" means the production of biomass expressed as the mass of carbon which it contains. Net primary production is the excess of gross primary production (rate of synthesis of biomass from inorganic precursors) by autotrophs ("producers"), for example, photosynthesis in plants or phytoplankton, over the rate at which the autotrophs themselves respire some of this biomass. "Productivity" means production per unit area. The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Phytoplankton are algae that grow where there is sufficient light to support photosynthesis. mol m-2 s-1 - "Production of carbon" means the production of biomass expressed as the mass of carbon which it contains. Net primary production is the excess of gross primary production (rate of synthesis of biomass from inorganic precursors) by autotrophs ("producers"), for example, photosynthesis in plants or phytoplankton, over the rate at which the autotrophs themselves respire some of this biomass. "Productivity" means production per unit area. Picophytoplankton are phytoplankton of less than 2 micrometers in size. Phytoplankton are autotrophic prokaryotic or eukaryotic algae that live near the water surface where there is sufficient light to support photosynthesis. The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. + "Production of carbon" means the production of biomass expressed as the mass of carbon which it contains. Net primary production is the excess of gross primary production (rate of synthesis of biomass from inorganic precursors) by autotrophs ("producers"), for example, photosynthesis in plants or phytoplankton, over the rate at which the autotrophs themselves respire some of this biomass. "Productivity" means production per unit area. The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. Picophytoplankton are phytoplankton of less than 2 micrometers in size. Phytoplankton are algae that grow where there is sufficient light to support photosynthesis. mol m-2 s-1 - "Production of carbon" refers to the production of biomass expressed as the mass of carbon which it contains. Net primary production is the excess of gross primary production (rate of synthesis of biomass from inorganic precursors) by autotrophs ("producers"), for example, photosynthesis in plants or phytoplankton, over the rate at which the autotrophs themselves respire some of this biomass. "Productivity" means production per unit area. "Nitrate utilization" means net primary production by phytoplankton based on nitrate alone. Phytoplankton are autotrophic prokaryotic or eukaryotic algae that live near the water surface where there is sufficient light to support photosynthesis. The chemical formula for the nitrate anion is NO3-. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. + "Production of carbon" refers to the production of biomass expressed as the mass of carbon which it contains. Net primary production is the excess of gross primary production (rate of synthesis of biomass from inorganic precursors) by autotrophs ("producers"), for example, photosynthesis in plants or phytoplankton, over the rate at which the autotrophs themselves respire some of this biomass. "Productivity" means production per unit area. The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Nitrate utilization" means net primary production by phytoplankton based on nitrate alone. Phytoplankton are algae that grow where there is sufficient light to support photosynthesis. The chemical formula for the nitrate anion is NO3-. kg m-3 s-1 - Net primary production is the excess of gross primary production (rate of synthesis of biomass from inorganic precursors) by autotrophs ("producers"), for example, photosynthesis in plants or phytoplankton, over the rate at which the autotrophs themselves respire some of this biomass. In the oceans, carbon production per unit volume is often found at a number of depths at a given horizontal location. That quantity can then be integrated to calculate production per unit area at the location. Standard names for production per unit area use the term "productivity". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constiuents of A. + Net primary production is the excess of gross primary production (rate of synthesis of biomass from inorganic precursors) by autotrophs ("producers"), for example, photosynthesis in plants or phytoplankton, over the rate at which the autotrophs themselves respire some of this biomass. In the oceans, carbon production per unit volume is often found at a number of depths at a given horizontal location. That quantity can then be integrated to calculate production per unit area at the location. Standard names for production per unit area use the term "productivity". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. @@ -12306,7 +14026,7 @@ totals are summed to obtain the index. kg m-2 s-1 - "Production of carbon" means the production of biomass expressed as the mass of carbon which it contains. Net primary production is the excess of gross primary production (rate of synthesis of biomass from inorganic precursors) by autotrophs ("producers"), for example, photosynthesis in plants or phytoplankton, over the rate at which the autotrophs themselves respire some of this biomass. "Productivity" means production per unit area. The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. + "Production of carbon" means the production of biomass expressed as the mass of carbon which it contains. Net primary production is the excess of gross primary production (rate of synthesis of biomass from inorganic precursors) by autotrophs ("producers"), for example, photosynthesis in plants or phytoplankton, over the rate at which the autotrophs themselves respire some of this biomass. "Productivity" means production per unit area. The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. @@ -12386,6 +14106,41 @@ totals are summed to obtain the index. A phrase assuming_condition indicates that the named quantity is the value which would obtain if all aspects of the system were unaltered except for the assumption of the circumstances specified by the condition. "shortwave" means shortwave radiation. "Upward" indicates a vector component which is positive when directed upward (negative downward). Net upward radiation is the difference between radiation from below (upwelling) and radiation from above (downwelling). In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. + + % + + + 1000 hour fuel moisture (FM1000) represents the modelled moisture content in the dead fuels in the 3 to 8 inch diameter class and the layer of the forest floor about 4 inches below the surface. The value is based on a running 7-day average. The 1000-hour time lag fuel moisture is a function of length of day (as influenced by latitude and calendar date), daily temperature and relative humidity extremes (maximum and minimum values) and the 24-hour precipitation duration values for a 7-day period. It is a component in the US National Fire Danger Rating System. The US National Fire Danger Rating System comprises several numeric indexes that rate the potential over a large area for wildland fires to ignite, spread, and require action to suppress or manage. It was designed for use in the continental United States, and all its components are relative, not absolute. + + + + % + + + 100 hour fuel moisture (FM100) represents the modeled moisture content of dead fuels in the 1 to 3 inch diameter class. It can also be used as a very rough estimate of the average moisture content of the forest floor from three-fourths inch to 4 inches below the surface. The 100-hour timelag fuel moisture is a function of length of day (as influenced by latitude and calendar date), maximum and minimum temperature and relative humidity, and precipitation duration in the previous 24 hours. It is a component in the US National Fire Danger Rating System. The US National Fire Danger Rating System comprises several numeric indexes that rate the potential over a large area for wildland fires to ignite, spread, and require action to suppress or manage. It was designed for use in the continental United States, and all its components are relative, not absolute. + + + + 1 + + + The Burning Index (BI) is a numeric value closely related to the flame length in feet multiplied by 10, which is related to the contribution of fire behaviour to the effort of containing a fire. The BI is a function of fire spread and fire intensity and is derived from a combination of Spread and Energy Release Components. The Spread Component is a rating of the forward rate of spread of a head fire and wind is a key input. The scale is open ended which allows the range of numbers to adequately define fire problems, even in time of low to moderate fire danger. Computed BI values represent the near upper limit to be expected on the rating area. In other words, if a fire occurs in the worst fuel, weather and topography conditions of the rating area, these numbers indicate its expected fire line intensities and flame length. It is an index in the US National Fire Danger Rating System. The US National Fire Danger Rating System comprises several numeric indexes that rate the potential over a large area for wildland fires to ignite, spread, and require action to suppress or manage. It was designed for use in the continental United States, and all its components are relative, not absolute. + + + + J m-2 + + + The Energy Release Component (ERC) is a number related to the available energy per unit area within the flaming front at the head of a fire. It is usually given in BTU ft-2. Daily variations in ERC are due to changes in moisture content of the various fuels present, both live and dead. It may also be considered a composite fuel moisture value as it reflects the contribution that all live and dead fuels have to potential fire intensity. Energy Release Component is a cumulative index. The scale is open-ended and relative. Energy Release Component values depend on the fuel model input into the calculations and interpretation of precise values varies with ecology and region. It is an index in the US National Fire Danger Rating System. The US National Fire Danger Rating System comprises several numeric indexes that rate the potential over a large area for wildland fires to ignite, spread, and require action to suppress or manage. It was designed for use in the continental United States, and all its components are relative, not absolute. + + + + 1 + + + Severe Fire Danger Index (SFDI) is the normalized product of normalized Energy Release Component (ERC) and normalized Burning Index (BI) from the United States National Fire Danger Rating System (NFDRS). While SFDI is not officially part of the National Fire Danger Rating System, it is related to and intended to supplement NFDRS. It is commonly categorized into five classes based on percentile: low (0-60), moderate (60-80), high (80-90), very high (90-97), and extreme (97-100). It can be extended to future conditions by introducing an unprecedented category for values above the historical 100th percentile. As it is locally normalized, its interpretation remains the same across space. + + 1 @@ -12400,11 +14155,11 @@ totals are summed to obtain the index. Diatoms are phytoplankton with an external skeleton made of silica. Phytoplankton are algae that grow where there is sufficient light to support photosynthesis. "Nitrogen growth limitation" means the ratio of the growth rate of a species population in the environment (where there is a finite availability of nitrogen) to the theoretical growth rate if there were no such limit on nitrogen availability. - + 1 - In ocean modelling, diazotrophs are phytoplankton of the phylum cyanobacteria distinct from other phytoplankton groups in their ability to fix nitrogen gas in addition to nitrate and ammonium. Phytoplankton are algae that grow where there is sufficient light to support photosynthesis. "Nitrogen growth limitation" means the ratio of the growth rate of a species population in the environment (where there is a finite availability of nitrogen) to the theoretical growth rate if there were no such limit on nitrogen availability. + "Nitrogen growth limitation" means the ratio of the growth rate of a biological population in the environment (where there is a finite availability of nitrogen) to the theoretical growth rate if there were no such limit on nitrogen availability. Phytoplankton are algae that grow where there is sufficient light to support photosynthesis. Diazotrophic phytoplankton are phytoplankton (predominantly from Phylum Cyanobacteria) that are able to fix molecular nitrogen (gas or solute) in addition to nitrate and ammonium. @@ -12456,6 +14211,13 @@ totals are summed to obtain the index. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. "Vegetation" means any living plants e.g. trees, shrubs, grass. "Litter" is dead plant material in or above the soil. + + kg s-1 + + + The amount of total nitrogen mass transported in the river channels from land into the ocean. This quantity can be provided at a certain location within the river network and floodplain (over land) or at the river mouth (over ocean) where the river enters the ocean. "River" refers to water in the fluvial system (stream and floodplain). + + m @@ -12470,6 +14232,13 @@ totals are summed to obtain the index. "Normalized_difference_vegetation_index", usually abbreviated to NDVI, is an index calculated from reflectances measured in the visible and near infrared channels. It is calculated as NDVI = (NIR - R) / (NIR + R) where NIR is the reflectance in the near-infrared band and R is the reflectance in the red visible band. Reflectance is the ratio of the reflected over the incoming radiation in each spectral band. The calculated value of NDVI depends on the precise definitions of the spectral bands and these definitions may vary between different models and remote sensing instruments. + + m s-1 + + + The northward motion of air, relative to near-surface northward current; calculated as northward_wind minus northward_sea_water_velocity. A vertical coordinate variable or scalar coordinate with standard name "depth" should be used to indicate the depth of sea water velocity used in the calculation. Similarly, a vertical coordinate variable or scalar coordinate with standard name "height" should be used to indicate the height of the the wind component. A velocity is a vector quantity. "Northward" indicates a vector component which is positive when directed northward (negative southward). + + W m-1 @@ -12485,7 +14254,7 @@ totals are summed to obtain the index. - kg s-1 m-1 + kg m-1 s-1 "Water" means water in all phases. "Northward" indicates a vector component which is positive when directed northward (negative southward). Transport across_unit_distance means expressed per unit distance normal to the direction of transport. @@ -12505,6 +14274,27 @@ totals are summed to obtain the index. A velocity is a vector quantity. "Eastward" indicates a vector component which is positive when directed eastward (negative westward). "Northward" indicates a vector component which is positive when directed northward (negative southward). Sea ice velocity is defined as a two-dimensional vector, with no vertical component. The phrase "component_derivative_of_X" means derivative of X with respect to distance in the component direction, which may be northward, southward, eastward, westward, x or y. The last two indicate derivatives along the axes of the grid, in the case where they are not true longitude and latitude. The named quantity is a component of the strain rate tensor for sea ice. "Sea ice" means all ice floating in the sea which has formed from freezing sea water, rather than by other processes such as calving of land ice to form icebergs. + + s-1 + + + The quantity with standard name northward_derivative_of_eastward_wind is the derivative of the eastward component of the wind with respect to distance in the northward direction for a given atmospheric level. The phrase "component_derivative_of_X" means derivative of X with respect to distance in the component direction, which may be "northward", "southward", "eastward", "westward", "upward", "downward", "x" or "y". The last two indicate derivatives along the axes of the grid, in the case where they are not true longitude and latitude. A positive value indicates that X is increasing with distance along the positive direction of the axis. Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name "upward_air_velocity"). + + + + s-1 + + + The quantity with standard name northward_derivative_of_northward_wind is the derivative of the northward component of wind with respect to distance in the northward direction for a given atmospheric level. The phrase "component_derivative_of_X" means derivative of X with respect to distance in the component direction, which may be "northward", "southward", "eastward", "westward", "upward", "downward", "x" or "y". The last two indicate derivatives along the axes of the grid, in the case where they are not true longitude and latitude. A positive value indicates that X is increasing with distance along the positive direction of the axis. Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name "upward_air_velocity"). + + + + degree m-1 + + + The quantity with standard name northward_derivative_of_wind_from_direction is the derivative of wind from_direction with respect to the change in northward lateral position for a given atmospheric level. The phrase "component_derivative_of_X" means derivative of X with respect to distance in the component direction, which may be "northward", "southward", "eastward", "westward", "upward", "downward", "x" or "y". The last two indicate derivatives along the axes of the grid, in the case where they are not true longitude and latitude. A positive value indicates that X is increasing with distance along the positive direction of the axis. The phrase "from_direction" is used in the construction X_from_direction and indicates the direction from which the velocity vector of X is coming. The direction is a bearing in the usual geographical sense, measured positive clockwise from due north. In meteorological reports, the direction of the wind vector is usually (but not always) given as the direction from which it is blowing ("wind_from_direction") (westerly, northerly, etc.). In other contexts, such as atmospheric modelling, it is often natural to give the direction in the usual manner of vectors as the heading or the direction to which it is blowing ("wind_to_direction") (eastward, southward, etc.). Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name "upward_air_velocity"). + + m3 s-2 @@ -12519,6 +14309,13 @@ totals are summed to obtain the index. A velocity is a vector quantity. "Northward" indicates a vector component which is positive when directed northward (negative southward). Flood water is water that covers land which is normally not covered by water. + + m s-1 + + + A velocity is a vector quantity. "Northward" indicates a vector component which is positive when directed northward (negative southward). Friction velocity is a reference wind velocity derived from the relationship between air density and downward stress and is usually applied at a level close to the surface where stress is assumed to independent of height and approximately proportional to the square of mean velocity. + + W m-2 @@ -12642,7 +14439,7 @@ totals are summed to obtain the index. W - "Northward" indicates a vector component which is positive when directed northward (negative southward). The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Parameterized eddy advection in an ocean model means the part due to a scheme representing parameterized eddy-induced advective effects not included in the resolved model velocity field. Parameterized submesoscale eddy advection occurs on a spatial scale of the order of 1 km horizontally. Reference: James C. McWilliams 2016, Submesoscale currents in the ocean, Proceedings of the Royal Society A: Mathematical, Physical and Engineering Sciences, volume 472, issue 2189. DOI: 10.1098/rspa.2016.0117. There are also standard names for parameterized_mesoscale_eddy_advection which, along with parameterized_submesoscale_eddy_advection, contributes to the total parameterized eddy advection. + "Northward" indicates a vector component which is positive when directed northward (negative southward). The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Parameterized eddy advection in an ocean model means the part due to a scheme representing parameterized eddy-induced advective effects not included in the resolved model velocity field. Parameterized submesoscale eddy advection occurs on a spatial scale of the order of 1 km horizontally. Reference: James C. McWilliams 2016, Submesoscale currents in the ocean, Proceedings of the Royal Society A: Mathematical, Physical and Engineering Sciences, volume 472, issue 2189. DOI: 10.1098/rspa.2016.0117. There are also standard names for parameterized_mesoscale_eddy_advection which, along with parameterized_submesoscale_eddy_advection, contributes to the total parameterized eddy advection. @@ -12708,6 +14505,20 @@ totals are summed to obtain the index. A velocity is a vector quantity. "Northward" indicates a vector component which is positive when directed northward (negative southward). A phrase assuming_condition indicates that the named quantity is the value which would obtain if all aspects of the system were unaltered except for the assumption of the circumstances specified by the condition. + + m s-1 + + + A velocity is a vector quantity. "Northward" indicates a vector component which is positive when directed northward (negative southward). The velocity at the sea floor is that adjacent to the ocean bottom, which would be the deepest grid cell in an ocean model and within the benthic boundary layer for measurements. + + + + m s-1 + + + A velocity is a vector quantity. "Northward" indicates a vector component which is positive when directed northward (negative southward). The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. + + m s-1 @@ -12715,6 +14526,13 @@ totals are summed to obtain the index. "Northward" indicates a vector component which is positive when directed northward (negative southward). The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Parameterized mesoscale eddies occur on a spatial scale of many tens of kilometres and an evolutionary time of weeks. Reference: James C. McWilliams 2016, Submesoscale currents in the ocean, Proceedings of the Royal Society A: Mathematical, Physical and Engineering Sciences, volume 472, issue 2189. DOI: 10.1098/rspa.2016.0117. Parameterized mesoscale eddies are represented in ocean models using schemes such as the Gent-McWilliams scheme. + + m s-1 + + + A velocity is a vector quantity. "Northward" indicates a vector component which is positive when directed northward (negative southward). The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Due to tides" means due to all astronomical gravity changes which manifest as tides. No distinction is made between different tidal components. + + m s-1 @@ -12757,13 +14575,6 @@ totals are summed to obtain the index. "Northward" indicates a vector component which is positive when directed northward (negative southward). Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name upward_air_velocity.) - - s-1 - 46 - - "Northward" indicates a vector component which is positive when directed northward (negative southward). Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name upward_air_velocity.) Wind shear is the derivative of wind with respect to height. - - kg m-2 @@ -12785,6 +14596,13 @@ totals are summed to obtain the index. "Number concentration" means the number of particles or other specified objects per unit volume. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "stp" means standard temperature (0 degC) and pressure (101325 Pa). The surface called "surface" means the lower boundary of the atmosphere. + + m-3 + + + "Number concentration" means the number of particles or other specified objects per unit volume. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. + + m-3 @@ -12792,6 +14610,20 @@ totals are summed to obtain the index. "Number concentration" means the number of particles or other specified objects per unit volume. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Ambient_aerosol" means that the aerosol is measured or modelled at the ambient state of pressure, temperature and relative humidity that exists in its immediate environment. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. + + m-3 + + + "Number concentration" means the number of particles or other specified objects per unit volume. "Biological taxon" is a name or other label identifying an organism or a group of organisms as belonging to a unit of classification in a hierarchical taxonomy. There must be an auxiliary coordinate variable with standard name biological_taxon_name to identify the taxon in human readable format and optionally an auxiliary coordinate variable with standard name biological_taxon_lsid to provide a machine-readable identifier. See Section 6.1.2 of the CF convention (version 1.8 or later) for information about biological taxon auxiliary coordinate variables. + + + + m-3 + + + "Number concentration" means the number of particles or other specified objects per unit volume. "Pollen grain" refers to the male gametophyte of seed plants (either angiosperms or gymnosperms). The number concentration of pollen grains refers to the number of individual pollen grains per unit volume. "Biological taxon" is a name or other label identifying an organism or a group of organisms as belonging to a unit of classification in a hierarchical taxonomy. There must be an auxiliary coordinate variable with standard name biological_taxon_name to identify the taxon in human readable format and optionally an auxiliary coordinate variable with standard name biological_taxon_identifier to provide a machine-readable identifier. See Section 6.1.2 of the CF convention (version 1.8 or later) for information about biological taxon auxiliary coordinate variables. + + m-3 @@ -12799,18 +14631,25 @@ totals are summed to obtain the index. The cloud condensation nuclei number concentration is the total number of aerosol particles per unit volume independent of and integrated over particle size that act as condensation nuclei for liquid-phase clouds. A coordinate variable with the standard name of relative_humidity should be specified to indicate that the property refers to a specific supersaturation with respect to liquid water. The ability of a particle to act as a condensation nucleus is determined by its size, chemical composition, and morphology. "stp" means standard temperature (0 degC) and pressure (101325 Pa). + + m-3 + + + "Number concentration" means the number of particles or other specified objects per unit volume. The cloud condensation nuclei number concentration is the total number of aerosol particles per unit volume independent of and integrated over particle size that act as condensation nuclei for liquid-phase clouds. A coordinate variable with the standard name of relative_humidity should be specified to indicate that the property refers to a specific supersaturation with respect to liquid water. The ability of a particle to act as a condensation nucleus is determined by its size, chemical composition, and morphology. + + m-3 - "Number concentration" means the number of particles or other specified objects per unit volume. + "Number concentration" means the number of particles or other specified objects per unit volume. "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. m-3 - "Number concentration" means the number of particles or other specified objects per unit volume. cloud_top refers to the top of the highest cloud. + "Number concentration" means the number of particles or other specified objects per unit volume. "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. cloud_top refers to the top of the highest cloud. @@ -12820,11 +14659,11 @@ totals are summed to obtain the index. "Number concentration" means the number of particles or other specified objects per unit volume. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Ambient_aerosol" means that the aerosol is measured or modelled at the ambient state of pressure, temperature and relative humidity that exists in its immediate environment. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. Coarse mode aerosol particles have a diameter of more than 1 micrometer. - + m-3 - "Number concentration" means the number of particles or other specified objects per unit volume.The phrase "convective_liquid_water_cloud_top" refers to the top of the highest convective liquid water cloud. Convective cloud is that produced by the convection schemes in an atmosphere model. + "Number concentration" means the number of particles or other specified objects per unit volume. The phrase "convective_liquid_water_cloud_top" refers to the top of the highest convective liquid water cloud. Convective cloud is that produced by the convection schemes in an atmosphere model. "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. @@ -12869,32 +14708,32 @@ totals are summed to obtain the index. "Number concentration" means the number of particles or other specified objects per unit volume. "Pm2p5 aerosol" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 2.5 micrometers. To specify the relative humidity and temperature at which the particle size applies, provide scalar coordinate variables with the standard names of, respectively, "relative_humidity" and "air_temperature". - + m-3 - "Number concentration" means the number of particles or other specified objects per unit volume. The phrase "stratiform_liquid_water_cloud_top" refers to the top of the highest stratiform liquid water cloud. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). + "Number concentration" means the number of particles or other specified objects per unit volume. The phrase "stratiform_liquid_water_cloud_top" refers to the top of the highest stratiform liquid water cloud. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. 1 - Air temperature is the bulk temperature of the air, not the surface (skin) temperature. A variable whose standard name has the form number_of_days_with_X_below|above_threshold is a count of the number of days on which the condition X_below|above_threshold is satisfied. It must have a coordinate variable or scalar coordinate variable with the a standard name of X to supply the threshold(s). It must have a climatological time variable, and a cell_methods entry for within days which describes the processing of quantity X before the threshold is applied. A number_of_days is an extensive quantity in time, and the cell_methods entry for over days should be "sum". + Air temperature is the bulk temperature of the air, not the surface (skin) temperature. A variable whose standard name has the form number_of_days_with_X_below|above_threshold is a count of the number of days on which the condition X_below|above_threshold is satisfied. It must have a coordinate variable or scalar coordinate variable with the standard name of X to supply the threshold(s). It must have a climatological time variable, and a cell_methods entry for within days which describes the processing of quantity X before the threshold is applied. A number_of_days is an extensive quantity in time, and the cell_methods entry for over days should be "sum". 1 - Air temperature is the bulk temperature of the air, not the surface (skin) temperature. A variable whose standard name has the form number_of_days_with_X_below|above_threshold is a count of the number of days on which the condition X_below|above_threshold is satisfied. It must have a coordinate variable or scalar coordinate variable with the a standard name of X to supply the threshold(s). It must have a climatological time variable, and a cell_methods entry for within days which describes the processing of quantity X before the threshold is applied. A number_of_days is an extensive quantity in time, and the cell_methods entry for over days should be "sum". + Air temperature is the bulk temperature of the air, not the surface (skin) temperature. A variable whose standard name has the form number_of_days_with_X_below|above_threshold is a count of the number of days on which the condition X_below|above_threshold is satisfied. It must have a coordinate variable or scalar coordinate variable with the standard name of X to supply the threshold(s). It must have a climatological time variable, and a cell_methods entry for within days which describes the processing of quantity X before the threshold is applied. A number_of_days is an extensive quantity in time, and the cell_methods entry for over days should be "sum". 1 - The construction lwe_thickness_of_X_amount or _content means the vertical extent of a layer of liquid water having the same mass per unit area. "Precipitation" in the earth's atmosphere means precipitation of water in all phases. The abbreviation "lwe" means liquid water equivalent. A variable whose standard name has the form number_of_days_with_X_below|above_threshold is a count of the number of days on which the condition X_below|above_threshold is satisfied. It must have a coordinate variable or scalar coordinate variable with the a standard name of X to supply the threshold(s). It must have a climatological time variable, and a cell_methods entry for within days which describes the processing of quantity X before the threshold is applied. A number_of_days is an extensive quantity in time, and the cell_methods entry for over days should be "sum". + The construction lwe_thickness_of_X_amount or _content means the vertical extent of a layer of liquid water having the same mass per unit area. "Precipitation" in the earth's atmosphere means precipitation of water in all phases. The abbreviation "lwe" means liquid water equivalent. A variable whose standard name has the form number_of_days_with_X_below|above_threshold is a count of the number of days on which the condition X_below|above_threshold is satisfied. It must have a coordinate variable or scalar coordinate variable with the standard name of X to supply the threshold(s). It must have a climatological time variable, and a cell_methods entry for within days which describes the processing of quantity X before the threshold is applied. A number_of_days is an extensive quantity in time, and the cell_methods entry for over days should be "sum". @@ -12908,7 +14747,7 @@ totals are summed to obtain the index. 1 - Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name upward_air_velocity.) The wind speed is the magnitude of the wind velocity. A variable whose standard name has the form number_of_days_with_X_below|above_threshold is a count of the number of days on which the condition X_below|above_threshold is satisfied. It must have a coordinate variable or scalar coordinate variable with the a standard name of X to supply the threshold(s). It must have a climatological time variable, and a cell_methods entry for within days which describes the processing of quantity X before the threshold is applied. A number_of_days is an extensive quantity in time, and the cell_methods entry for over days should be "sum". + Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name upward_air_velocity.) The wind speed is the magnitude of the wind velocity. A variable whose standard name has the form number_of_days_with_X_below|above_threshold is a count of the number of days on which the condition X_below|above_threshold is satisfied. It must have a coordinate variable or scalar coordinate variable with the standard name of X to supply the threshold(s). It must have a climatological time variable, and a cell_methods entry for within days which describes the processing of quantity X before the threshold is applied. A number_of_days is an extensive quantity in time, and the cell_methods entry for over days should be "sum". @@ -12918,6 +14757,13 @@ totals are summed to obtain the index. The number of icebergs per unit area. + + 1 + + + A variable with the standard name of number_of_missing_observations contains the number of discrete observations or measurements that were not available to derive the values of another data variable. The linkage between the data variable and the variable with a standard_name of number_of_missing_observations is achieved using the ancillary_variables attribute. + + 1 @@ -12925,6 +14771,34 @@ totals are summed to obtain the index. A variable with the standard name of number_of_observations contains the number of discrete observations or measurements from which the values of another data variable have been derived. The linkage between the data variable and the variable with a standard_name of number_of_observations is achieved using the ancillary_variables attribute. + + m-3 + + + The aerosol particle number size distribution is the number concentration of aerosol particles as a function of particle diameter. A coordinate variable with the standard name of electrical_mobility_particle_diameter, aerodynamic_particle_diameter, or optical_particle_diameter should be specified to indicate that the property applies at specific particle sizes selected by the indicated method. To specify the relative humidity at which the particle sizes were selected, provide a scalar coordinate variable with the standard name of relative_humidity_for_aerosol_particle_size_selection. "log10_X" means common logarithm (i.e. base 10) of X. "stp" means standard temperature (0 degC) and pressure (101325 Pa). + + + + m-3 + + + The aerosol particle number size distribution is the number concentration of aerosol particles as a function of particle diameter. A coordinate variable with the standard name of electrical_mobility_particle_diameter, aerodynamic_particle_diameter, or optical_particle_diameter should be specified to indicate that the property applies at specific particle sizes selected by the indicated method. To specify the relative humidity at which the particle sizes were selected, provide a scalar coordinate variable with the standard name of relative_humidity_for_aerosol_particle_size_selection. + + + + m-3 + + + The cloud condensation nuclei number size distribution is the number concentration of aerosol particles as a function of particle diameter, where the particle acts as condensation nucleus for liquid-phase clouds. A coordinate variable with the standard name of relative_humidity should be specified to indicate that the property refers to a specific supersaturation with respect to liquid water. A coordinate variable with the standard name of electrical_mobility_particle_diameter should be specified to indicate that the property applies at specific mobility particle sizes. To specify the relative humidity at which the particle sizes were selected, provide a scalar coordinate variable with the standard name of relative_humidity_for_aerosol_particle_size_selection. The ability of a particle to act as a condensation nucleus is determined by its size, chemical composition, and morphology. "stp" means standard temperature (0 degC) and pressure (101325 Pa). + + + + m-3 + + + The cloud condensation nuclei number size distribution is the number concentration of aerosol particles as a function of particle diameter, where the particle acts as condensation nucleus for liquid-phase clouds. A coordinate variable with the standard name of relative_humidity should be specified to indicate that the property refers to a specific supersaturation with respect to liquid water. A coordinate variable with the standard name of electrical_mobility_particle_diameter should be specified to indicate that the property applies at specific mobility particle sizes. To specify the relative humidity at which the particle sizes were selected, provide a scalar coordinate variable with the standard name of relative_humidity_for_aerosol_particle_size_selection. The ability of a particle to act as a condensation nucleus is determined by its size, chemical composition, and morphology. + + kg s-1 @@ -12939,6 +14813,13 @@ totals are summed to obtain the index. + + 1 + + + See Appendix D of the CF convention for information about parametric vertical coordinates. + + W @@ -13006,7 +14887,7 @@ totals are summed to obtain the index. kg m-2 - "Content" indicates a quantity per unit area. "Dissolvedinorganic carbon" describes a family of chemical species in solution, including carbon dioxide, carbonic acid and the carbonate and bicarbonate anions. "Dissolved inorganic carbon" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. + "Content" indicates a quantity per unit area. The "ocean content" of a quantity refers to the vertical integral from the surface to the bottom of the ocean. "Dissolved inorganic carbon" describes a family of chemical species in solution, including carbon dioxide, carbonic acid and the carbonate and bicarbonate anions. "Dissolved inorganic carbon" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. @@ -13090,7 +14971,7 @@ totals are summed to obtain the index. kg s-1 - The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Parameterized eddy advection in an ocean model means the part due to a scheme representing parameterized eddy-induced advective effects not included in the resolved model velocity field. Parameterized submesoscale eddy advection occurs on a spatial scale of the order of 1 km horizontally. Reference: James C. McWilliams 2016, Submesoscale currents in the ocean, Proceedings of the Royal Society A: Mathematical, Physical and Engineering Sciences, volume 472, issue 2189. DOI: 10.1098/rspa.2016.0117. There are also standard names for parameterized_mesoscale_eddy_advection which, along with parameterized_submesoscale_eddy_advection, contributes to the total parameterized eddy advection. + The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Parameterized eddy advection in an ocean model means the part due to a scheme representing parameterized eddy-induced advective effects not included in the resolved model velocity field. Parameterized submesoscale eddy advection occurs on a spatial scale of the order of 1 km horizontally. Reference: James C. McWilliams 2016, Submesoscale currents in the ocean, Proceedings of the Royal Society A: Mathematical, Physical and Engineering Sciences, volume 472, issue 2189. DOI: 10.1098/rspa.2016.0117. There are also standard names for parameterized_mesoscale_eddy_advection which, along with parameterized_submesoscale_eddy_advection, contributes to the total parameterized eddy advection. @@ -13167,7 +15048,14 @@ totals are summed to obtain the index. m2 s-2 - Mongomery potential is defined as M = ap + gz, where a = specific volume, p = pressure, g = gravity, and z=depth. It represents an exact streamfunction on specificvolume anomaly surfaces. + Montgomery potential is defined as M = ap + gz, where a = specific volume, p = pressure, g = gravity, and z=depth. It represents an exact streamfunction on specific volume anomaly surfaces. + + + + m + + + The depth in the ocean, L, that buoyant production or destruction of turbulent energy balances the turbulent kinetic energy: L = -u*3 / (kB0), where u* is the oceanic surface frictional velocity, k is the von Karman constant, and B0 is the oceanic surface buoyancy flux. If the buoyancy flux is destabilizing, L is negative. @@ -13209,35 +15097,35 @@ totals are summed to obtain the index. 1 - See Appendix D of the CF convention for information about dimensionless vertical coordinates. + See Appendix D of the CF convention for information about parametric vertical coordinates. 1 - See Appendix D of the CF convention for information about dimensionless vertical coordinates. + See Appendix D of the CF convention for information about parametric vertical coordinates. 1 - See Appendix D of the CF convention for information about dimensionless vertical coordinates. + See Appendix D of the CF convention for information about parametric vertical coordinates. 1 - See Appendix D of the CF convention for information about dimensionless vertical coordinates. Note that the ocean sigma coordinate is not the same quantity as sea water sigma (excess of density over 1000 kg m-3), for which there are various other standard names. + See Appendix D of the CF convention for information about parametric vertical coordinates. Note that the ocean sigma coordinate is not the same quantity as sea water sigma (excess of density over 1000 kg m-3), for which there are various other standard names. 1 - See Appendix D of the CF convention for information about dimensionless vertical coordinates. + See Appendix D of the CF convention for information about parametric vertical coordinates. @@ -13314,28 +15202,28 @@ totals are summed to obtain the index. m2 s-1 - Diffusivity is also sometimes known as the coefficient of diffusion. Diffusion occurs as a result of a gradient in the spatial distribution of mass concentration, temperature or momentum. The diffusivity may be very different in the vertical and horizontal directions. The construction vertical_X_diffusivity means the vertical component of thediffusivity of X due to motion which is not resolved on the grid scale of the model. "Due to background" means caused by a time invariant imposed field which may be eitherconstant over the globe or spatially varying, depending on the ocean model used. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. + Diffusivity is also sometimes known as the coefficient of diffusion. Diffusion occurs as a result of a gradient in the spatial distribution of mass concentration, temperature or momentum. The diffusivity may be very different in the vertical and horizontal directions. The construction "vertical_X_diffusivity" means the vertical component of the diffusivity of X due to motion which is not resolved on the grid scale of the model. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Due to background" means caused by a time invariant imposed field which may be either constant over the globe or spatially varying, depending on the ocean model used. m2 s-1 - The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Vertical momentum diffusivity" means the vertical component of the diffusivity of momentum due to motion which is not resolved on the grid scale of the model. Convective mixing in the ocean is somtimes modelled as an enhanced diffusivity. + "Vertical momentum diffusivity" means the vertical component of the diffusivity of momentum due to motion which is not resolved on the grid scale of the model. The diffusivity may be very different in the vertical and horizontal directions. Diffusivity is also sometimes known as the coefficient of diffusion. Diffusion occurs as a result of a gradient in the spatial distribution of mass concentration, temperature or momentum. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Convective mixing in the ocean is sometimes modelled as an enhanced diffusivity. m2 s-1 - Diffusivity is also sometimes known as the coefficient of diffusion. Diffusion occurs as a result of a gradient in the spatial distribution of mass concentration, temperature or momentum. The diffusivity may be very different in the vertical and horizontal directions. The construction vertical_X_diffusivity means the vertical component of thediffusivity of X due to motion which is not resolved on the grid scale of the model. "Due to form drag" refers to a vertical diffusivity resulting from a model scheme representing mesoscale eddy-induced form drag. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. + The construction vertical_X_diffusivity means the vertical component of the diffusivity of X due to motion which is not resolved on the grid scale of the model. The diffusivity may be very different in the vertical and horizontal directions. Diffusivity is also sometimes known as the coefficient of diffusion. Diffusion occurs as a result of a gradient in the spatial distribution of mass concentration, temperature or momentum. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Due to form drag" refers to a vertical diffusivity resulting from a model scheme representing mesoscale eddy-induced form drag. m2 s-1 - Diffusivity is also sometimes known as the coefficient of diffusion. Diffusion occurs as a result of a gradient in the spatial distribution of mass concentration, temperature or momentum. The diffusivity may be very different in the vertical and horizontal directions. The construction vertical_X_diffusivity means the vertical component of thediffusivity of X due to motion which is not resolved on the grid scale of the model. "Due to tides" means due to all astronomical gravity changes which manifest as tides.No distinction is made between different tidal components. The specification of a physicalprocess by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. + The construction vertical_X_diffusivity means the vertical component of the diffusivity of X due to motion which is not resolved on the grid scale of the model. The diffusivity may be very different in the vertical and horizontal directions. Diffusivity is also sometimes known as the coefficient of diffusion. Diffusion occurs as a result of a gradient in the spatial distribution of mass concentration, temperature or momentum. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Due to tides" means due to all astronomical gravity changes which manifest as tides. No distinction is made between different tidal components. @@ -13356,7 +15244,7 @@ totals are summed to obtain the index. m2 s-1 - Diffusivity is also sometimes known as the coefficient of diffusion. Diffusion occurs as a result of a gradient in the spatial distribution of mass concentration, temperature or momentum. The diffusivity may be very different in the vertical and horizontal directions. The construction vertical_X_diffusivity means the vertical component of thediffusivity of X due to motion which is not resolved on the grid scale of the model. "Due to background" means caused by a time invariant imposed field which may be eitherconstant over the globe or spatially varying, depending on the ocean model used. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. + Diffusivity is also sometimes known as the coefficient of diffusion. Diffusion occurs as a result of a gradient in the spatial distribution of mass concentration, temperature or momentum. The diffusivity may be very different in the vertical and horizontal directions. The construction "vertical_X_diffusivity" means the vertical component of the diffusivity of X due to motion which is not resolved on the grid scale of the model. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Due to background" means caused by a time invariant imposed field which may be either constant over the globe or spatially varying, depending on the ocean model used. @@ -13370,7 +15258,7 @@ totals are summed to obtain the index. m2 s-1 - Diffusivity is also sometimes known as the coefficient of diffusion. Diffusion occurs as a result of a gradient in the spatial distribution of mass concentration, temperature or momentum. The diffusivity may be very different in the vertical and horizontal directions. The construction vertical_X_diffusivity means the vertical component of thediffusivity of X due to motion which is not resolved on the grid scale of the model. "Due to tides" means due to all astronomical gravity changes which manifest as tides.No distinction is made between different tidal components. The specification of a physicalprocess by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. + The construction vertical_X_diffusivity means the vertical component of the diffusivity of X due to motion which is not resolved on the grid scale of the model. The diffusivity may be very different in the vertical and horizontal directions. Diffusivity is also sometimes known as the coefficient of diffusion. Diffusion occurs as a result of a gradient in the spatial distribution of mass concentration, temperature or momentum. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Due to tides" means due to all astronomical gravity changes which manifest as tides. No distinction is made between different tidal components. @@ -13391,7 +15279,7 @@ totals are summed to obtain the index. 1 - "X_volume_fraction" means the fraction of volume occupied by X. + "X_volume_fraction" means the fraction of volume occupied by X. It is evaluated as the volume of interest divided by the grid cell volume. It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. A data variable with standard name ocean_volume_fraction is used to store the fraction of a grid cell underlying sea-water, for example, where part of the grid cell is occupied by land or to record ocean volume on a model's native grid following a regridding operation. @@ -13412,7 +15300,7 @@ totals are summed to obtain the index. m3 s-1 - "y" indicates a vector component along the grid y-axis, positive with increasing y. + "y" indicates a vector component along the grid y-axis, positive with increasing y. @@ -13440,7 +15328,7 @@ totals are summed to obtain the index. kg s-1 - "y" indicates a vector component along the grid y-axis, positive with increasing y. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Parameterized eddy advection in an ocean model means the part due to a scheme representing parameterized eddy-induced advective effects not included in the resolved model velocity field. Parameterized submesoscale eddy advection occurs on a spatial scale of the order of 1 km horizontally. Reference: James C. McWilliams 2016, Submesoscale currents in the ocean, Proceedings of the Royal Society A: Mathematical, Physical and Engineering Sciences, volume 472, issue 2189. DOI: 10.1098/rspa.2016.0117. There are also standard names for parameterized_mesoscale_eddy_advection which, along with parameterized_submesoscale_eddy_advection, contributes to the total parameterized eddy advection. + "y" indicates a vector component along the grid y-axis, positive with increasing y. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Parameterized eddy advection in an ocean model means the part due to a scheme representing parameterized eddy-induced advective effects not included in the resolved model velocity field. Parameterized submesoscale eddy advection occurs on a spatial scale of the order of 1 km horizontally. Reference: James C. McWilliams 2016, Submesoscale currents in the ocean, Proceedings of the Royal Society A: Mathematical, Physical and Engineering Sciences, volume 472, issue 2189. DOI: 10.1098/rspa.2016.0117. There are also standard names for parameterized_mesoscale_eddy_advection which, along with parameterized_submesoscale_eddy_advection, contributes to the total parameterized eddy advection. @@ -13482,28 +15370,56 @@ totals are summed to obtain the index. Pa - The partial pressure of a dissolved gas in sea water is the partial pressure in air with which it would be in equilibrium. The partial pressure of a gaseous constituent of air is the pressure which it alone would exert with unchanged temperature and number of moles per unit volume. The chemical formula for carbon dioxide is CO2. + The partial pressure of a dissolved gas in sea water is the partial pressure in air with which it would be in equilibrium. The partial pressure of a gaseous constituent of air is the pressure that it would exert if all other gaseous constituents were removed, assuming the volume, the temperature, and its number of moles remain unchanged. The chemical formula for carbon dioxide is CO2. Pa - The partial pressure of a dissolved gas in sea water is the partial pressure in air with which it would be in equilibrium. The partial pressure of a gaseous constituent of air is the pressure which it alone would exert with unchanged temperature and number of moles per unit volume. The chemical formula for methane is CH4. + The partial pressure of a dissolved gas in sea water is the partial pressure in air with which it would be in equilibrium. The partial pressure of a gaseous constituent of air is the pressure that it would exert if all other gaseous constituents were removed, assuming the volume, the temperature, and its number of moles remain unchanged. The chemical formula for methane is CH4. - - m + + degree_C - Permafrost is soil or rock that has remained at a temperature at or below zero degrees Celsius throughout the seasonal cycle for two or more years. "Thickness" means the vertical extent of a layer. + Perceived temperature (PT) is an equivalent air temperature of the actual thermal condition. It is the air temperature of a reference condition causing the same thermal perception in a human body considering air temperature, wind speed, humidity, solar and thermal radiation as well as clothing and activity level. It is not the perceived air temperature, that derives either from wind chill and heat index and has the standard_name apparent_air_temperature. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). - - degree + + m - Global average sea level change is due to change in volume of the water in the ocean, caused by mass and/or density change, or to change in the volume of the ocean basins, caused by tectonics etc. It is sometimes called "eustatic", which is a term that also has other definitions. It differs from the change in the global average sea surface height relative to the centre of the Earth by the global average vertical movement of the ocean floor. Zero sea level change is an arbitrary level. Phase is the initial angle of a wave modelled by a sinusoidal function. A coordinate variable of harmonic_period should be used to specify the period of the sinusoidal wave. Because global average sea level change quantifies the change in volume of the world ocean, it is not calculated necessarily by considering local changes in mean sea level. + The quantity with standard name permafrost_active_layer_thickness is the thickness of the layer of the ground that is subject to annual thawing and freezing in areas underlain by permafrost. "Thickness" means the vertical extent of a layer. Permafrost is soil or rock that has remained at a temperature at or below zero degrees Celsius throughout the seasonal cycle for two or more years. + + + + 1 + + + "Area fraction" is the fraction of a grid cell's horizontal area that has some characteristic of interest. It is evaluated as the area of interest divided by the grid cell area. It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. Permafrost is soil or rock that has remained at a temperature at or below zero degrees Celsius throughout the seasonal cycle for two or more years. + + + + m + + + Permafrost is soil or rock that has remained at a temperature at or below zero degrees Celsius throughout the seasonal cycle for two or more years. "Thickness" means the vertical extent of a layer. + + + + degree + + + Global average sea level change is due to change in volume of the water in the ocean, caused by mass and/or density change, or to change in the volume of the ocean basins, caused by tectonics etc. It is sometimes called "eustatic", which is a term that also has other definitions. It differs from the change in the global average sea surface height relative to the centre of the Earth by the global average vertical movement of the ocean floor. Zero sea level change is an arbitrary level. Phase is the initial angle of a wave modelled by a sinusoidal function. A coordinate variable of harmonic_period should be used to specify the period of the sinusoidal wave. Because global average sea level change quantifies the change in volume of the world ocean, it is not calculated necessarily by considering local changes in mean sea level. + + + + kg s-1 + + + The amount of total phosphorus mass transported in the river channels from land into the ocean. This quantity can be provided at a certain location within the river network and floodplain (over land) or at the river mouth (over ocean) where the river enters the ocean. "River" refers to water in the fluvial system (stream and floodplain). Phosphorus means phosphorus in all chemical forms, commonly referred to as "total phosphorus". @@ -13534,6 +15450,13 @@ totals are summed to obtain the index. "Photolysis" is a chemical reaction in which a chemical compound is broken down by photons. The "reaction rate" is the rate at which the reactants of a chemical reaction form the products. The chemical formula for ozone is O3. The IUPAC name for ozone is trioxygen. "1D oxygen atom" means the singlet D state, an excited state, of the oxygen atom. The combined photolysis rate of ozone to both excited and ground state oxygen atoms has the standard name photolysis_rate_of_ozone. + + degree_C + + + Physiological equivalent temperature (PET) is an equivalent air temperature of the actual thermal condition. It is the air temperature of a reference condition without wind and solar radiation at which the heat budget of the human body is balanced with the same core and skin temperature. Note that PET here is not potential evapotranspiration. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). + + 1 @@ -13545,126 +15468,322 @@ totals are summed to obtain the index. degree - Standard names for "platform" describe the motion and orientation of the vehicle from which observations are made. Platforms include, but are not limited to, satellites, aeroplanes, ships, instruments and buoys. Platform azimuth angle is the horizontal angle between the line of sight from the observation point to the platform and a reference direction at the observation point, which is often due north. The angle is measured clockwise positive, starting from the reference direction. A comment attribute should be added to a data variable with the standard name platform_azimuth_angle to specify the reference direction. A standard name also exists for sensor_azimuth_angle. For some viewing geometries the sensor and the platform cannot be assumed to be close enough to neglect the difference in calculated azimuth angle. + Platform azimuth angle is the horizontal angle between the line of sight from the observation point to the platform and a reference direction at the observation point, which is often due north. The angle is measured clockwise positive, starting from the reference direction. A comment attribute should be added to a data variable with the standard name platform_azimuth_angle to specify the reference direction. A standard name also exists for sensor_azimuth_angle. For some viewing geometries the sensor and the platform cannot be assumed to be close enough to neglect the difference in calculated azimuth angle. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. degree - Standard names for "platform" describe the motion and orientation of the vehicle from which observations are made. Platforms include, but are not limited to, satellites, aeroplanes, ships, instruments and buoys. The platform course is the direction in which the platform is travelling (not necessarily the same as the direction in which it is pointing, called platform_orientation). + Course is the clockwise angle with respect to North of the nominal forward motion direction of the platform (not necessarily the same as the direction in which it is pointing, called "platform_orientation"). A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. m - Standard names for "platform" describe the motion and orientation of the vehicle from which observations are made. Platforms include, but are not limited to, satellites, aeroplanes, ships, instruments and buoys. "Heave" means the vertical displacement of a platform (positive upwards) over a measurement time interval. + Heave is a displacement along the local vertical axis. Heave is relative to the "at rest" position of the platform with respect to the axis of displacement. The "at rest" position of the platform may change over time. The standard name platform_heave should be chosen only if the sign convention of the data is unknown. For cases where the sign convention of the heave is known, a standard name of platform_heave_down or platform_heave_up should be chosen, as appropriate. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. + + + + m + + + Heave is a displacement along the local vertical axis. Heave is relative to the "at rest" position of the platform with respect to the axis of displacement. The "at rest" position of the platform may change over time. "Down" indicates that positive values of heave represent the platform moving down as viewed by an observer on top of the platform facing forward. The standard name platform_heave_up should be used for data having the opposite sign convention. The standard name platform_heave should be chosen only if the sign convention of the data is unknown. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. m s-1 - Standard names for "platform" describe the motion and orientation of the vehicle from which observations are made. Platforms include, but are not limited to, satellites, aeroplanes, ships, instruments and buoys. "Heave" means the vertical displacement of a platform (positive upwards) over a measurement time interval. "Heave rate" means the rate of change of vertical displacement of the platform over a measurement time interval. + "Heave rate" is the rate of displacement along the local vertical axis. Heave rate might not include changes to the "at rest" position of the platform with respect to the axis of displacement, which may change over time. The standard name platform_heave_rate should be chosen only if the sign convention of the data is unknown. For cases where the sign convention of the heave rate is known, a standard name of platform_heave_rate_down or platform_heave_rate_up should be chosen, as appropriate. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. + + + + m s-1 + + + "Heave rate" is the rate of displacement along the local vertical axis. Heave rate might not include changes to the "at rest" position of the platform with respect to the axis of displacement, which may change over time. "Down" indicates that positive values of heave rate represent the platform moving down as viewed by an observer on top of the platform facing forward. The standard name platform_heave_rate_up should be used for data having the opposite sign convention. The standard name platform_heave_rate should be chosen only if the sign convention of the data is unknown. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. + + + + m s-1 + + + "Heave rate" is the rate of displacement along the local vertical axis. Heave rate might not include changes to the "at rest" position of the platform with respect to the axis of displacement, which may change over time. "Up" indicates that positive values of heave rate represent the platform moving up as viewed by an observer on top of the platform facing forward. The standard name platform_heave_rate_down should be used for data having the opposite sign convention. The standard name platform_heave_rate should be chosen only if the sign convention of the data is unknown. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. + + + + m + + + Heave is a displacement along the local vertical axis. Heave is relative to the "at rest" position of the platform with respect to the axis of displacement. The "at rest" position of the platform may change over time. "Up" indicates that positive values of heave represent the platform moving up as viewed by an observer on top of the platform facing forward. The standard name platform_heave_down should be used for data having the opposite sign convention. The standard name platform_heave should be chosen only if the sign convention of the data is unknown. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. - Standard names for "platform" describe the motion and orientation of the vehicle from which observations are made. Platforms include, but are not limited to, satellites, aeroplanes, ships, instruments and buoys. A variable with the standard name of platform_id contains strings which help to identify the platform from which an observation was made. For example, this may be a WMO station identification number. + A variable with the standard name of platform_id contains strings which help to identify the platform from which an observation was made. For example, this may be a WMO station identification number. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. - Standard names for "platform" describe the motion and orientation of the vehicle from which observations are made. Platforms include, but are not limited to, satellites, aeroplanes, ships, instruments and buoys. A variable with the standard name of platform_name contains strings which help to identify the platform from which an observation was made. For example, this may be a geographical place name such as "South Pole" or the name of a meteorological observing station. + A variable with the standard name of platform_name contains strings which help to identify the platform from which an observation was made. For example, this may be a geographical place name such as "South Pole" or the name of a meteorological observing station. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. degree - Standard names for platform describe the motion and orientation of the vehicle from which observations are made e.g. aeroplane, ship or satellite. The platform orientation is the direction in which the "front" or longitudinal axis of the platform is pointing (not necessarily the same as the direction in which it is travelling, called platform_course). + Orientation is the clockwise angle with respect to North of the longitudinal (front-to-back) axis of the platform, which may be different to the platform course (which has the standard name platform_course). A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. + + + + degree + + + Pitch is a rotation about an axis that is perpendicular to both the local vertical axis and the nominal forward motion direction of the platform. Pitch is relative to the "at rest" rotation of the platform with respect to the axis of rotation. The "at rest" rotation of the platform may change over time. The standard name platform_pitch should be chosen only if the sign convention of the data is unknown. For cases where the sign convention of the pitch is known, a standard name of platform_pitch_fore_down or platform_pitch_fore_up should be chosen, as appropriate. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. + + + + degree + + + Pitch is a rotation about an axis that is perpendicular to both the local vertical axis and the nominal forward motion direction of the platform. Pitch is relative to the "at rest" rotation of the platform with respect to the axis of rotation. The "at rest" rotation of the platform may change over time. "Fore down" indicates that positive values of pitch represent the front of the platform falling as viewed by an observer on top of the platform facing forward. The standard name platform_pitch_fore_up should be used for data having the opposite sign convention. The standard name platform_pitch should be chosen only if the sign convention of the data is unknown. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. - + degree - Standard names for platform describe the motion and orientation of the vehicle from which observations are made e.g. aeroplane, ship or satellite. + Pitch is a rotation about an axis that is perpendicular to both the local vertical axis and the nominal forward motion direction of the platform. Pitch is relative to the "at rest" rotation of the platform with respect to the axis of rotation. The "at rest" rotation of the platform may change over time. "Fore up" indicates that positive values of pitch represent the front of the platform rising as viewed by an observer on top of the platform facing forward. The standard name platform_pitch_fore_down should be used for data having the opposite sign convention. The standard name platform_pitch should be chosen only if the sign convention of the data is unknown. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. degree s-1 - Standard names for platform describe the motion and orientation of the vehicle from which observations are made e.g. aeroplane, ship or satellite. + "Pitch rate" is the rate of rotation about an axis that is perpendicular to both the local vertical axis and the nominal forward motion direction of the platform. Pitch rate might not include changes to the "at rest" rotation of the platform with respect to the axis of rotation, which may change over time. The standard name platform_pitch_rate should be chosen only if the sign convention of the data is unknown. For cases where the sign convention of the pitch rate is known, a standard name of platform_pitch_rate_fore_down or platform_pitch_rate_fore_up should be chosen, as appropriate. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. + + + + degree s-1 + + + "Pitch rate" is the rate of rotation about an axis that is perpendicular to both the local vertical axis and the nominal forward motion direction of the platform. Pitch rate might not include changes to the "at rest" rotation of the platform with respect to the axis of rotation, which may change over time. "Fore down" indicates that positive values of pitch rate represent the front of the platform falling as viewed by an observer on top of the platform facing forward. The standard name platform_pitch_rate_fore_up should be used for data having the opposite sign convention. The standard name platform_pitch_rate should be chosen only if the sign convention of the data is unknown. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. - + + degree s-1 + + + "Pitch rate" is the rate of rotation about an axis that is perpendicular to both the local vertical axis and the nominal forward motion direction of the platform. Pitch rate might not include changes to the "at rest" rotation of the platform with respect to the axis of rotation, which may change over time. "Fore up" indicates that positive values of pitch rate represent the front of the platform rising as viewed by an observer on top of the platform facing forward. The standard name platform_pitch_rate_fore_down should be used for data having the opposite sign convention. The standard name platform_pitch_rate should be chosen only if the sign convention of the data is unknown. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. + + + degree - Standard names for platform describe the motion and orientation of the vehicle from which observations are made e.g. aeroplane, ship or satellite. + Roll is a rotation about an axis that is perpendicular to the local vertical axis and is coplanar with the nominal forward motion direction of the platform. Roll is relative to the "at rest" rotation of the platform with respect to the axis of rotation. The "at rest" rotation of the platform may change over time. The standard name platform_roll should be chosen only if the sign convention of the data is unknown. For cases where the sign convention of the roll is known, a standard name of platform_roll_starboard_down or platform_roll_starboard_up should be chosen, as appropriate. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. degree s-1 - Standard names for platform describe the motion and orientation of the vehicle from which observations are made e.g. aeroplane, ship or satellite. + "Roll rate" is the rate of rotation about an axis that is perpendicular to the local vertical axis and is coplanar with the nominal forward motion direction of the platform. Roll rate might not include changes to the "at rest" rotation of the platform with respect to the axis of rotation, which may change over time. The standard name platform_roll_rate should be chosen only if the sign convention of the data is unknown. For cases where the sign convention of the roll rate is known, a standard name of platform_roll_rate_starboard_down or platform_roll_rate_starboard_up should be chosen, as appropriate. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. + + + + degree s-1 + + + "Roll rate" is the rate of rotation about an axis that is perpendicular to the local vertical axis and is coplanar with the nominal forward motion direction of the platform. Roll rate might not include changes to the "at rest" rotation of the platform with respect to the axis of rotation, which may change over time. "Starboard down" indicates that positive values of roll rate represent the right side of the platform falling as viewed by an observer on top of the platform facing forward. The standard name platform_roll_rate_starboard_up should be used for data having the opposite sign convention. The standard name platform_roll_rate should be chosen only if the sign convention of the data is unknown. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. + + + + degree s-1 + + + "Roll rate" is the rate of rotation about an axis that is perpendicular to the local vertical axis and is coplanar with the nominal forward motion direction of the platform. Roll rate might not include changes to the "at rest" rotation of the platform with respect to the axis of rotation, which may change over time. "Starboard up" indicates that positive values of roll rate represent the right side of the platform rising as viewed by an observer on top of the platform facing forward. The standard name platform_roll_rate_starboard_down should be used for data having the opposite sign convention. The standard name platform_roll_rate should be chosen only if the sign convention of the data is unknown. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. + + + + degree + + + Roll is a rotation about an axis that is perpendicular to the local vertical axis and is coplanar with the nominal forward motion direction of the platform. Roll is relative to the "at rest" rotation of the platform with respect to the axis of rotation. The "at rest" rotation of the platform may change over time. "Starboard down" indicates that positive values of roll represent the right side of the platform falling as viewed by an observer on top of the platform facing forward. The standard name platform_roll_starboard_up should be used for data having the opposite sign convention. The standard name platform_roll should be chosen only if the sign convention of the data is unknown. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. + + + + degree + + + Roll is a rotation about an axis that is perpendicular to the local vertical axis and is coplanar with the nominal forward motion direction of the platform. Roll is relative to the "at rest" rotation of the platform with respect to the axis of rotation. The "at rest" rotation of the platform may change over time. "Starboard up" indicates that positive values of roll represent the right side of the platform rising as viewed by an observer on top of the platform facing forward. The standard name platform_roll_starboard_down should be used for data having the opposite sign convention. The standard name platform_roll should be chosen only if the sign convention of the data is unknown. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. m s-1 - Standard names for "platform" describe the motion and orientation of the vehicle from which observations are made. Platforms include, but are not limited to, satellites, aeroplanes, ships, instruments and buoys. The phrase "wrt" means "with respect to". The abbreviation "wrt" means with respect to. Speed is the magnitude of velocity. The platform speed with respect to air is often called the "air speed" of the platform. + Speed is the magnitude of velocity. The abbreviation "wrt" means with respect to. The platform speed with respect to air is often called the "air speed" of the platform. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. m s-1 - Standard names for "platform" describe the motion and orientation of the vehicle from which observations are made. Platforms include, but are not limited to, satellites, aeroplanes, ships, instruments and buoys. The abbreviation "wrt" means with respect to. Speed is the magnitude of velocity. The platform speed with respect to ground is relative to the solid Earth beneath it, i.e. the sea floor for a ship. It is often called the "ground speed" of the platform. + Speed is the magnitude of velocity. The abbreviation "wrt" means with respect to. The platform speed with respect to ground is relative to the solid Earth beneath it, i.e. the sea floor for a ship. It is often called the "ground speed" of the platform. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. m s-1 - Standard names for "platform" describe the motion and orientation of the vehicle from which observations are made. Platforms include, but are not limited to, satellites, aeroplanes, ships, instruments and buoys. The abbreviation "wrt" means with respect to. Speed is the magnitude of velocity. + Speed is the magnitude of velocity. The abbreviation "wrt" means with respect to. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. + + + + m + + + Surge is a displacement along an axis that is perpendicular to the local vertical axis and is coplanar with the nominal forward motion direction of the platform. Surge is relative to the "at rest" position of the platform with respect to the axis of displacement. The "at rest" position of the platform may change over time. The standard name platform_surge should be chosen only if the sign convention of the data is unknown. For cases where the sign convention of the surge is known, a standard name of platform_surge_fore or platform_surge_aft should be chosen, as appropriate. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. + + + + m + + + Surge is a displacement along an axis that is perpendicular to the local vertical axis and is coplanar with the nominal forward motion direction of the platform. Surge is relative to the "at rest" position of the platform with respect to the axis of displacement. The "at rest" position of the platform may change over time. "Aft" indicates that positive values of surge represent the platform moving backward as viewed by an observer on top of the platform facing forward. The standard name platform_surge_fore should be used for data having the opposite sign convention. The standard name platform_surge should be chosen only if the sign convention of the data is unknown. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. + + + + m + + + Surge is a displacement along an axis that is perpendicular to the local vertical axis and is coplanar with the nominal forward motion direction of the platform. Surge is relative to the "at rest" position of the platform with respect to the axis of displacement. The "at rest" position of the platform may change over time. "Fore" indicates that positive values of surge represent the platform moving forward as viewed by an observer on top of the platform facing forward. The standard name platform_surge_aft should be used for data having the opposite sign convention. The standard name platform_surge should be chosen only if the sign convention of the data is unknown. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. + + + + m s-1 + + + "Surge rate" is the rate of displacement along an axis that is perpendicular to the local vertical axis and is coplanar with the nominal forward motion direction of the platform. Surge rate might not include changes to the "at rest" position of the platform with respect to the axis of displacement, which may change over time. The standard name platform_surge_rate should be chosen only if the sign convention of the data is unknown. For cases where the sign convention of the surge rate is known, a standard name of platform_surge_rate_fore or platform_surge_rate_aft should be chosen, as appropriate. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. + + + + m s-1 + + + "Surge rate" is the rate of displacement along an axis that is perpendicular to the local vertical axis and is coplanar with the nominal forward motion direction of the platform. Surge rate might not include changes to the "at rest" position of the platform with respect to the axis of displacement, which may change over time. "Aft" indicates that positive values of surge rate represent the platform moving backward as viewed by an observer on top of the platform facing forward. The standard name platform_surge_rate_fore should be used for data having the opposite sign convention. The standard name platform_surge_rate should be chosen only if the sign convention of the data is unknown. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. + + + + m s-1 + + + "Surge rate" is the rate of displacement along an axis that is perpendicular to the local vertical axis and is coplanar with the nominal forward motion direction of the platform. Surge rate might not include changes to the "at rest" position of the platform with respect to the axis of displacement, which may change over time. "Fore" indicates that positive values of surge rate represent the platform moving forward as viewed by an observer on top of the platform facing forward. The standard name platform_surge_rate_aft should be used for data having the opposite sign convention. The standard name platform_surge_rate should be chosen only if the sign convention of the data is unknown. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. + + + + m + + + Sway is a displacement along an axis that is perpendicular to both the local vertical axis and the nominal forward motion direction of the platform. Sway is relative to the "at rest" position of the platform with respect to the axis of displacement. The "at rest" position of the platform may change over time. The standard name platform_sway should be chosen only if the sign convention of the data is unknown. For cases where the sign convention of the sway is known, a standard name of platform_sway_starboard or platform_sway_port should be chosen, as appropriate. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. + + + + m + + + Sway is a displacement along an axis that is perpendicular to both the local vertical axis and the nominal forward motion direction of the platform. Sway is relative to the "at rest" position of the platform with respect to the axis of displacement. The "at rest" position of the platform may change over time. "Port" indicates that positive values of sway represent the platform moving left as viewed by an observer on top of the platform facing forward. The standard name platform_sway_starboard should be used for data having the opposite sign convention. The standard name platform_sway should be chosen only if the sign convention of the data is unknown. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. + + + + m s-1 + + + "Sway rate" is the rate of displacement along an axis that is perpendicular to both the local vertical axis and the nominal forward motion direction of the platform. Sway rate might not include changes to the "at rest" position of the platform with respect to the axis of displacement, which may change over time. The standard name platform_sway_rate should be chosen only if the sign convention of the data is unknown. For cases where the sign convention of the sway rate is known, a standard name of platform_sway_rate_starboard or platform_sway_rate_port should be chosen, as appropriate. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. + + + + m s-1 + + + "Sway rate" is the rate of displacement along an axis that is perpendicular to both the local vertical axis and the nominal forward motion direction of the platform. Sway rate might not include changes to the "at rest" position of the platform with respect to the axis of displacement, which may change over time. "Port" indicates that positive values of sway rate represent the platform moving left as viewed by an observer on top of the platform facing forward. The standard name platform_sway_rate_starboard should be used for data having the opposite sign convention. The standard name platform_sway_rate should be chosen only if the sign convention of the data is unknown. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. + + + + m s-1 + + + "Sway rate" is the rate of displacement along an axis that is perpendicular to both the local vertical axis and the nominal forward motion direction of the platform. Sway rate might not include changes to the "at rest" position of the platform with respect to the axis of displacement, which may change over time. "Starboard" indicates that positive values of sway rate represent the platform moving right as viewed by an observer on top of the platform facing forward. The standard name platform_sway_rate_port should be used for data having the opposite sign convention. The standard name platform_sway_rate should be chosen only if the sign convention of the data is unknown. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. + + + + m + + + Sway is a displacement along an axis that is perpendicular to both the local vertical axis and the nominal forward motion direction of the platform. Sway is relative to the "at rest" position of the platform with respect to the axis of displacement. The "at rest" position of the platform may change over time. "Starboard" indicates that positive values of sway represent the platform moving right as viewed by an observer on top of the platform facing forward. The standard name platform_sway_port should be used for data having the opposite sign convention. The standard name platform_sway should be chosen only if the sign convention of the data is unknown. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. degree - Standard names for "platform" describe the motion and orientation of the vehicle from which observations are made. Platforms include, but are not limited to, satellites, aeroplanes, ships, instruments and buoys. Platform view angle is the angle between the line of sight from the platform and the direction straight vertically down. Zero view angle means looking directly beneath the platform. There is no standardized sign convention for platform_view_angle. A standard name also exists for sensor_view_angle. For some viewing geometries the sensor and the platform cannot be assumed to be close enough to neglect the difference in calculated view angle. + Platform view angle is the angle between the line of sight from the platform and the direction straight vertically down. Zero view angle means looking directly beneath the platform. There is no standardized sign convention for platform_view_angle. A standard name also exists for sensor_view_angle. For some viewing geometries the sensor and the platform cannot be assumed to be close enough to neglect the difference in calculated view angle. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. + + + + degree + + + Yaw is a rotation about the local vertical axis. Yaw is relative to the "at rest" rotation of the platform with respect to the axis of rotation. The "at rest" rotation of the platform may change over time. The standard name platform_yaw should be chosen only if the sign convention of the data is unknown. For cases where the sign convention of the yaw is known, a standard name of platform_yaw_fore_starboard or platform_yaw_fore_port should be chosen, as appropriate. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. + + + + degree + + + Yaw is a rotation about the local vertical axis. Yaw is relative to the "at rest" rotation of the platform with respect to the axis of rotation. The "at rest" rotation of the platform may change over time. "Fore port" indicates that positive values of yaw represent the front of the platform moving to the left as viewed by an observer on top of the platform facing forward. The standard name platform_yaw_fore_starboard should be used for data having the opposite sign convention. The standard name platform_yaw should be chosen only if the sign convention of the data is unknown. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. - + degree - Standard names for platform describe the motion and orientation of the vehicle from which observations are made e.g. aeroplane, ship or satellite. + Yaw is a rotation about the local vertical axis. Yaw is relative to the "at rest" rotation of the platform with respect to the axis of rotation. The "at rest" rotation of the platform may change over time. "Fore starboard" indicates that positive values of yaw represent the front of the platform moving to the right as viewed by an observer on top of the platform facing forward. The standard name platform_yaw_fore_port should be used for data having the opposite sign convention. The standard name platform_yaw should be chosen only if the sign convention of the data is unknown. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. degree s-1 - Standard names for platform describe the motion and orientation of the vehicle from which observations are made e.g. aeroplane, ship or satellite. + "Yaw rate" is the rate of rotation about the local vertical axis. Yaw rate might not include changes to the "at rest" rotation of the platform with respect to the axis of rotation, which may change over time. The standard name platform_yaw_rate should be chosen only if the sign convention of the data is unknown. For cases where the sign convention of the yaw rate is known, a standard name of platform_yaw_rate_fore_starboard or platform_yaw_rate_fore_port should be chosen, as appropriate. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. + + + + degree s-1 + + + "Yaw rate" is the rate of rotation about the local vertical axis. Yaw rate might not include changes to the "at rest" rotation of the platform with respect to the axis of rotation, which may change over time. "Fore port" indicates that positive values of yaw rate represent the front of the platform moving to the left as viewed by an observer on top of the platform facing forward. The standard name platform_yaw_rate_fore_starboard should be used for data having the opposite sign convention. The standard name platform_yaw_rate should be chosen only if the sign convention of the data is unknown. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. + + + + degree s-1 + + + "Yaw rate" is the rate of rotation about the local vertical axis. Yaw rate might not include changes to the "at rest" rotation of the platform with respect to the axis of rotation, which may change over time. "Fore starboard" indicates that positive values of yaw rate represent the front of the platform moving to the right as viewed by an observer on top of the platform facing forward. The standard name platform_yaw_rate_fore_port should be used for data having the opposite sign convention. The standard name platform_yaw_rate should be chosen only if the sign convention of the data is unknown. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. degree - Standard names for "platform" describe the motion and orientation of the vehicle from which observations are made. Platforms include, but are not limited to, satellites, aeroplanes, ships, instruments and buoys. Platform zenith angle is the the angle between the line of sight to the platform and the local zenith at the observation target. This angle is measured starting from directly overhead and its range is from zero (directly overhead the observation target) to 180 degrees (directly below the observation target). Local zenith is a line perpendicular to the Earth's surface at a given location. "Observation target" means a location on the Earth defined by the sensor performing the observations. A standard name also exists for sensor_zenith_angle. For some viewing geometries the sensor and the platform cannot be assumed to be close enough to neglect the difference in calculated zenith angle. + Platform zenith angle is the the angle between the line of sight to the platform and the local zenith at the observation target. This angle is measured starting from directly overhead and its range is from zero (directly overhead the observation target) to 180 degrees (directly below the observation target). Local zenith is a line perpendicular to the Earth's surface at a given location. "Observation target" means a location on the Earth defined by the sensor performing the observations. A standard name also exists for sensor_zenith_angle. For some viewing geometries the sensor and the platform cannot be assumed to be close enough to neglect the difference in calculated zenith angle. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. @@ -13730,6 +15849,13 @@ totals are summed to obtain the index. "Precipitation" in the earth's atmosphere means precipitation of water in all phases. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. Unless indicated in the cell_methods attribute, a quantity is assumed to apply to the whole area of each horizontal grid box. Previously, the qualifier where_type was used to specify that the quantity applies only to the part of the grid box of the named type. Names containing the where_type qualifier are deprecated and newly created data should use the cell_methods attribute to indicate the horizontal area to which the quantity applies. "Canopy" means the vegetative covering over a surface. The canopy is often considered to be the outer surfaces of the vegetation. Plant height and the distribution, orientation and shape of plant leaves within a canopy influence the atmospheric environment and many plant processes within the canopy. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Canopy. + + + + + A variable with the standard name predominant_precipitation_type_at_surface contains strings which indicate the character of the predominant precipitating hydrometeor at a location or grid cell. These strings have not yet been standardised. Alternatively, the data variable may contain integers which can be translated to strings using flag_values and flag_meanings attributes. "Precipitation" in the earth's atmosphere means precipitation of water in all phases. The surface called "surface" means the lower boundary of the atmosphere. + + Pa @@ -13737,18 +15863,18 @@ totals are summed to obtain the index. The "effective cloud top defined by infrared radiation" is (approximately) the geometric height above the surface that is one optical depth at infrared wavelengths (in the region of 11 micrometers) below the cloud top that would be detected by visible and lidar techniques. Reference: Minnis, P. et al 2011 CERES Edition-2 Cloud Property Retrievals Using TRMM VIRS and Terra and Aqua MODIS Data x2014; Part I: Algorithms IEEE Transactions on Geoscience and Remote Sensing, 49(11), 4374-4400. doi: http://dx.doi.org/10.1109/TGRS.2011.2144601. - - K Pa s-1 + + 1 - "product_of_X_and_Y" means X*Y. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. "omegaX" is used for brevity to mean "lagrangian_tendency_of_air_pressure in standard names constructed as a combination of omega with some other quantity. + The construction "probability_distribution_of_X_over_Z" means that the data variable is a number in the range 0.0-1.0 for each range of X, where X varies over Z. The data variable should have an axis for X. Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name "upward_air_velocity"). The phrase "from_direction" is used in the construction X_from_direction and indicates the direction from which the velocity vector of X is coming. The direction is a bearing in the usual geographical sense, measured positive clockwise from due north. In meteorological reports, the direction of the wind vector is usually (but not always) given as the direction from which it is blowing ("wind_from_direction") (westerly, northerly, etc.). In other contexts, such as atmospheric modelling, it is often natural to give the direction in the usual manner of vectors as the heading or the direction to which it is blowing ("wind_to_direction") (eastward, southward, etc.). K - "product_of_X_and_Y" means X*Y. "specific" means per unit mass. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. Specific humidity is the mass fraction of water vapor in (moist) air. + The phrase "product_of_X_and_Y" means X*Y. "specific" means per unit mass. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. Specific humidity is the mass fraction of water vapor in (moist) air. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). @@ -13762,14 +15888,14 @@ totals are summed to obtain the index. K m s-1 - "product_of_X_and_Y" means X*Y. A velocity is a vector quantity. "Eastward" indicates a vector component which is positive when directed eastward (negative westward). + The phrase "product_of_X_and_Y" means X*Y. A velocity is a vector quantity. "Eastward" indicates a vector component which is positive when directed eastward (negative westward). It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K m s-1 - "product_of_X_and_Y" means X*Y. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. "Eastward" indicates a vector component which is positive when directed eastward (negative westward). Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name upward_air_velocity.) + The phrase "product_of_X_and_Y" means X*Y. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. "Eastward" indicates a vector component which is positive when directed eastward (negative westward). Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name upward_air_velocity.) It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). @@ -13779,6 +15905,13 @@ totals are summed to obtain the index. "product_of_X_and_Y" means X*Y. Geopotential is the sum of the specific gravitational potential energy relative to the geoid and the specific centripetal potential energy. Geopotential height is the geopotential divided by the standard acceleration due to gravity. It is numerically similar to the altitude (or geometric height) and not to the quantity with standard name height, which is relative to the surface. "Eastward" indicates a vector component which is positive when directed eastward (negative westward). Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name upward_air_velocity.) + + Pa m s-2 + + + The phrase "product_of_X_and_Y" means X*Y. "Eastward" indicates a vector component which is positive when directed eastward (negative westward). Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name upward_air_velocity.) The phrase "tendency_of_X" means derivative of X with respect to time. The Lagrangian tendency of a quantity is its rate of change following the motion of the fluid, also called the "material derivative" or "convective derivative". The Lagrangian tendency of air pressure, often called "omega", plays the role of the upward component of air velocity when air pressure is being used as the vertical coordinate. If the vertical air velocity is upwards, it is negative when expressed as a tendency of air pressure; downwards is positive. Air pressure is the force per unit area which would be exerted when the moving gas molecules of which the air is composed strike a theoretical surface of any orientation. + + m2 s-2 @@ -13786,13 +15919,6 @@ totals are summed to obtain the index. "product_of_X_and_Y" means X*Y. "Eastward" indicates a vector component which is positive when directed eastward (negative westward). "Northward" indicates a vector component which is positive when directed northward (negative southward). Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name upward_air_velocity.) - - Pa m s-2 - - - "product_of_X_and_Y" means X*Y. "Eastward" indicates a vector component which is positive when directed eastward (negative westward). Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name upward_air_velocity.) "omegaX" is used for brevity to mean "lagrangian_tendency_of_air_pressure in standard names constructed as a combination of omega with some other quantity. - - m s-1 @@ -13807,11 +15933,25 @@ totals are summed to obtain the index. "product_of_X_and_Y" means X*Y. A velocity is a vector quantity. "Eastward" indicates a vector component which is positive when directed eastward (negative westward). "Upward" indicates a vector component which is positive when directed upward (negative downward). Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name upward_air_velocity.) Upward air velocity is the vertical component of the 3D air velocity vector. - + + K Pa s-1 + + mpwapta + The phrase "product_of_X_and_Y" means X*Y. The phrase "tendency_of_X" means derivative of X with respect to time. The Lagrangian tendency of a quantity is its rate of change following the motion of the fluid, also called the "material derivative" or "convective derivative". The Lagrangian tendency of air pressure, often called "omega", plays the role of the upward component of air velocity when air pressure is being used as the vertical coordinate. If the vertical air velocity is upwards, it is negative when expressed as a tendency of air pressure; downwards is positive. Air pressure is the force per unit area which would be exerted when the moving gas molecules of which the air is composed strike a theoretical surface of any orientation. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). + + + Pa m s-1 - "product_of_X_and_Y" means X*Y. Geopotential is the sum of the specific gravitational potential energy relative to the geoid and the specific centripetal potential energy. Geopotential height is the geopotential divided by the standard acceleration due to gravity. It is numerically similar to the altitude (or geometric height) and not to the quantity with standard name height, which is relative to the surface. "omegaX" is used for brevity to mean "lagrangian_tendency_of_air_pressure in standard names constructed as a combination of omega with some other quantity. + The phrase "product_of_X_and_Y" means X*Y. The phrase "tendency_of_X" means derivative of X with respect to time. The Lagrangian tendency of a quantity is its rate of change following the motion of the fluid, also called the "material derivative" or "convective derivative". The Lagrangian tendency of air pressure, often called "omega", plays the role of the upward component of air velocity when air pressure is being used as the vertical coordinate. If the vertical air velocity is upwards, it is negative when expressed as a tendency of air pressure; downwards is positive. Air pressure is the force per unit area which would be exerted when the moving gas molecules of which the air is composed strike a theoretical surface of any orientation. Geopotential is the sum of the specific gravitational potential energy relative to the geoid and the specific centripetal potential energy. Geopotential height is the geopotential divided by the standard acceleration due to gravity. It is numerically similar to the altitude (or geometric height) and not to the quantity with standard name height, which is relative to the surface. + + + + Pa s-1 + + mpwhusa + The phrase "product_of_X_and_Y" means X*Y. The phrase "tendency_of_X" means derivative of X with respect to time. The Lagrangian tendency of a quantity is its rate of change following the motion of the fluid, also called the "material derivative" or "convective derivative". The Lagrangian tendency of air pressure, often called "omega", plays the role of the upward component of air velocity when air pressure is being used as the vertical coordinate. If the vertical air velocity is upwards, it is negative when expressed as a tendency of air pressure; downwards is positive. Air pressure is the force per unit area which would be exerted when the moving gas molecules of which the air is composed strike a theoretical surface of any orientation. "Specific" means per unit mass. Specific humidity is the mass fraction of water vapor in (moist) air. @@ -13825,14 +15965,14 @@ totals are summed to obtain the index. K m s-1 - "product_of_X_and_Y" means X*Y. A velocity is a vector quantity. "Northward" indicates a vector component which is positive when directed northward (negative southward). + The phrase "product_of_X_and_Y" means X*Y. A velocity is a vector quantity. "Northward" indicates a vector component which is positive when directed northward (negative southward). It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K m s-1 mpvta - "product_of_X_and_Y" means X*Y. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. "Northward" indicates a vector component which is positive when directed northward (negative southward). Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name upward_air_velocity.) + The phrase "product_of_X_and_Y" means X*Y. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. "Northward" indicates a vector component which is positive when directed northward (negative southward). Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name upward_air_velocity.) It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). @@ -13842,11 +15982,11 @@ totals are summed to obtain the index. "product_of_X_and_Y" means X*Y. Geopotential is the sum of the specific gravitational potential energy relative to the geoid and the specific centripetal potential energy. Geopotential height is the geopotential divided by the standard acceleration due to gravity. It is numerically similar to the altitude (or geometric height) and not to the quantity with standard name height, which is relative to the surface. "Northward" indicates a vector component which is positive when directed northward (negative southward). Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name upward_air_velocity.) - + Pa m s-2 - "product_of_X_and_Y" means X*Y. "Northward" indicates a vector component which is positive when directed northward (negative southward). Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name upward_air_velocity.) "omegaX" is used for brevity to mean "lagrangian_tendency_of_air_pressure in standard names constructed as a combination of omega with some other quantity. + The phrase "product_of_X_and_Y" means X*Y. "Northward" indicates a vector component which is positive when directed northward (negative southward). Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name upward_air_velocity.) The phrase "tendency_of_X" means derivative of X with respect to time. The Lagrangian tendency of a quantity is its rate of change following the motion of the fluid, also called the "material derivative" or "convective derivative". The Lagrangian tendency of air pressure, often called "omega", plays the role of the upward component of air velocity when air pressure is being used as the vertical coordinate. If the vertical air velocity is upwards, it is negative when expressed as a tendency of air pressure; downwards is positive. Air pressure is the force per unit area which would be exerted when the moving gas molecules of which the air is composed strike a theoretical surface of any orientation. @@ -13863,32 +16003,11 @@ totals are summed to obtain the index. "product_of_X_and_Y" means X*Y. A velocity is a vector quantity. "Northward" indicates a vector component which is positive when directed northward (negative southward). "Upward" indicates a vector component which is positive when directed upward (negative downward). Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name upward_air_velocity.) Upward air velocity is the vertical component of the 3D air velocity vector. - - K Pa s-1 - - mpwapta - "product_of_X_and_Y" means X*Y. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. "omegaX" is used for brevity to mean "lagrangian_tendency_of_air_pressure in standard names constructed as a combination of omega with some other quantity. - - - - Pa s-1 - - mpwhusa - "product_of_X_and_Y" means X*Y. "specific" means per unit mass. Specific humidity is the mass fraction of water vapor in (moist) air. "omegaX" is used for brevity to mean "lagrangian_tendency_of_air_pressure in standard names constructed as a combination of omega with some other quantity. - - - - Pa s-1 - - - "product_of_X_and_Y" means X*Y. "specific" means per unit mass. Specific humidity is the mass fraction of water vapor in (moist) air. "omegaX" is used for brevity to mean "lagrangian_tendency_of_air_pressure in standard names constructed as a combination of omega with some other quantity. - - K m s-1 - "product_of_X_and_Y" means X*Y. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. A velocity is a vector quantity. "Upward" indicates a vector component which is positive when directed upward (negative downward). Upward air velocity is the vertical component of the 3D air velocity vector. + The phrase "product_of_X_and_Y" means X*Y. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. A velocity is a vector quantity. "Upward" indicates a vector component which is positive when directed upward (negative downward). Upward air velocity is the vertical component of the 3D air velocity vector. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). @@ -13898,6 +16017,13 @@ totals are summed to obtain the index. "product_of_X_and_Y" means X*Y. "specific" means per unit mass. A velocity is a vector quantity. "Upward" indicates a vector component which is positive when directed upward (negative downward). Specific humidity is the mass fraction of water vapor in (moist) air. Upward air velocity is the vertical component of the 3D air velocity vector. + + radian + + + "x" indicates a vector component along the grid x-axis, when this is not true longitude, positive with increasing x. Angular projection coordinates are angular distances in the x- and y-directions on a plane onto which the surface of the Earth has been projected according to a map projection. The relationship between the angular projection coordinates and latitude and longitude is described by the grid_mapping. + + m @@ -13905,6 +16031,13 @@ totals are summed to obtain the index. "x" indicates a vector component along the grid x-axis, when this is not true longitude, positive with increasing x. Projection coordinates are distances in the x- and y-directions on a plane onto which the surface of the Earth has been projected according to a map projection. The relationship between the projection coordinates and latitude and longitude is described by the grid_mapping. + + radian + + + "y" indicates a vector component along the grid y-axis, when this is not true latitude, positive with increasing y. Angular projection coordinates are angular distances in the x- and y-directions on a plane onto which the surface of the Earth has been projected according to a map projection. The relationship between the angular projection coordinates and latitude and longitude is described by the grid_mapping. + + m @@ -13912,32 +16045,46 @@ totals are summed to obtain the index. "y" indicates a vector component along the grid y-axis, when this is not true latitude, positive with increasing y. Projection coordinates are distances in the x- and y-directions on a plane onto which the surface of the Earth has been projected according to a map projection. The relationship between the projection coordinates and latitude and longitude is described by the grid_mapping. - - K - 14 + + 1 + - Potential temperature is the temperature a parcel of air or sea water would have if moved adiabatically to sea level pressure. + The phrase "proportion_of_acceptable_signal_returns" means the fraction of a collection (ensemble) of returned signal transmissions that have passed a set of automatic quality control criteria. For an ADCP (acoustic doppler current profiler) the rejection criteria include low correlation, large error velocity and fish detection. The dimensionless proportion is often but not exclusively expressed as a percentage, when it is referred to as "percent good". - - K + + 1 - + A variable with the standard name of quality_flag contains an indication of assessed quality information of another data variable. The linkage between the data variable and the variable or variables with the standard_name of quality_flag is achieved using the ancillary_variables attribute. m s-1 - A velocity is a vector quantity. Radial velocity away from instrument means the component of the velocity along the line of sight of the instrument where positive implies movement away from the instrument (i.e. outward). The "instrument" (examples are radar and lidar) is the device used to make an observation. + A velocity is a vector quantity. "Radial velocity away from instrument" means the component of the velocity along the line of sight of the instrument where positive implies movement away from the instrument (i.e. outward). The "instrument" (examples are radar and lidar) is the device used to make an observation. A standard name referring to radial velocity "toward_instrument" should be used for a data variable having the opposite sign convention. + + + + m s-1 + + + A velocity is a vector quantity. "Radial velocity toward instrument" means the component of the velocity along the line of sight of the instrument where positive implies movement toward the instrument (i.e. inward). The "instrument" (examples are radar and lidar) is the device used to make an observation. A standard name referring to radial velocity "away_from_instrument" should be used for a data variable having the opposite sign convention. m s-1 - A velocity is a vector quantity. "Radial velocity away from instrument" means the component of the velocity of the scatterers along the line of sight of the instrument where positive implies movement away from the instrument (i.e. outward). The "instrument" (examples are radar and lidar) is the device used to make the observation, and the "scatterers" are what causes the transmitted signal to be returned to the instrument (examples are aerosols, hydrometeors and refractive index irregularities), of whatever kind the instrument detects. + A velocity is a vector quantity. "Radial velocity away from instrument" means the component of the velocity along the line of sight of the instrument where positive implies movement away from the instrument (i.e. outward). The "instrument" (examples are radar and lidar) is the device used to make the observation. The "scatterers" are what causes the transmitted signal to be returned to the instrument (examples are aerosols, hydrometeors and refractive index irregularities), of whatever kind the instrument detects. A standard name referring to radial velocity "toward_instrument" should be used for a data variable having the opposite sign convention. + + + + m s-1 + + + A velocity is a vector quantity. "Radial velocity toward instrument" means the component of the velocity along the line of sight of the instrument where positive implies movement toward the instrument (i.e. inward). The "instrument" (examples are radar and lidar) is the device used to make the observation. The "scatterers" are what causes the transmitted signal to be returned to the instrument (examples are aerosols, hydrometeors and refractive index irregularities), of whatever kind the instrument detects. A standard name referring to radial velocity "away_from_instrument" should be used for a data variable having the opposite sign convention. @@ -16495,6 +18642,13 @@ totals are summed to obtain the index. "Radioactivity" means the number of radioactive decays of a material per second. "Radioactivity concentration" means radioactivity per unit volume of the medium. "Tc" means the element "technetium" and "99Tc" is the isotope "technetium-99" with a half-life of 7.79e+07 days. + + s + + + Time it takes for a radio wave, that was transmitted by an instrument to propagate through the air to the volume of air where it is scattered and return back to an instrument. The "instrument" (examples are radar and lidar) is the device used to make the observation. The "scatterers" are what causes the transmitted signal to be returned to the instrument (examples are aerosols, hydrometeors and refractive index irregularities in the air). A standard name referring to time taken for a radio signal to propagate from the emitting instrument to a scattering volume and back to an instrument. + + m @@ -16537,35 +18691,70 @@ totals are summed to obtain the index. - - s-1 + + 1 - The "reaction rate" is the rate at which the reactants of a chemical reaction form the products. The rate of "hydroxyl radical destruction due to reaction with nmvoc" is the nmvoc reactivity with regard to reactions with OH. It is the weighted sum of the reactivity of all individual nmvoc species with OH. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. The chemical formula for the hydroxyl radical is OH. In chemistry, a "radical" is a highly reactive, and therefore shortlived, species. "nmvoc" means non methane volatile organic compounds; "nmvoc" is the term used in standard names to describe the group of chemical species having this classification that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. + Rank of the matrix representing the logarithmic scale remote sensing averaging kernels (Weber 2019; Schneider et al., 2022) of the methane mole fractions obtained by a remote sensing observation (fractional changes of methane in the retrieved atmosphere relative to the fractional changes of methane in the true atmosphere, Rodgers 2000; Keppens et al., 2015). - - K s-1 + + 1 - The quantity with standard name ratio_of_sea_water_potential_temperature_anomaly_to_relaxation_timescale is a correction term applied to modelled sea water potential temperature. The term is estimated as the deviation of model local sea water potential temperature from an observation-based climatology (e.g. World Ocean Database) weighted by a user-specified relaxation coefficient in s-1 (1/(relaxation timescale)). Potential temperature is the temperature a parcel of air or sea water would have if moved adiabatically to sea level pressure. The phrase "ratio_of_X_to_Y" means X/Y. The term "anomaly" means difference from climatology. + Rank the matrix representing the remote sensing averaging kernels (Weber 2019; Schneider et al., 2022) of the methane mole fractions obtained by a remote sensing observation (changes of methane in the retrieved atmosphere relative to the changes of methane in the true atmosphere, Rodgers 2000). - - s-1 + + 1 - The quantity with standard name ratio_of_sea_water_practical_salinity_anomaly_to_relaxation_timescale is a correction term applied to modelled sea water practical salinity. The term is estimated as the deviation of model local sea water practical salinity from an observation-based climatology (e.g. World Ocean Database) weighted by a user-specified relaxation coefficient in s-1 (1/(relaxation timescale)). The phrase "ratio_of_X_to_Y" means X/Y. The term "anomaly" means difference from climatology. Practical Salinity, S_P, is a determination of the salinity of sea water, based on its electrical conductance. The measured conductance, corrected for temperature and pressure, is compared to the conductance of a standard potassium chloride solution, producing a value on the Practical Salinity Scale of 1978 (PSS-78). This name should not be used to describe salinity observations made before 1978, or ones not based on conductance measurements. Conversion of Practical Salinity to other precisely defined salinity measures should use the appropriate formulas specified by TEOS-10. Other standard names for precisely defined salinity quantities are sea_water_absolute_salinity (S_A); sea_water_preformed_salinity (S_*), sea_water_reference_salinity (S_R); sea_water_cox_salinity (S_C), used for salinity observations between 1967 and 1977; and sea_water_knudsen_salinity (S_K), used for salinity observations between 1901 and 1966. Salinity quantities that do not match any of the precise definitions shoul d be given the more general standard name of sea_water_salinity. Reference: www.teos-10.org; Lewis, 1980 doi:10.1109/JOE.1980.1145448. + A quality flag that reports the result of the Rate of Change test, which checks that the first order difference of a series of values is within reasonable bounds. The linkage between the data variable and this variable is achieved using the ancillary_variables attribute. There are standard names for other specific quality tests which take the form of X_quality_flag. Quality information that does not match any of the specific quantities should be given the more general standard name of quality_flag. - - m s-2 + + s-1 - Sea surface density is the density of sea water near the surface (including the part under sea-ice, if any). "component_derivative_of_X" means derivative of X with respect to distance in the component direction, which may be northward, southward, eastward, westward, x or y. The last two indicate derivatives along the axes of the grid, whether or not they are true longitude and latitude. "ratio_of_X_to_Y" means X/Y. "Ocean rigid lid pressure" means the pressure at the surface of an ocean model assuming that it is bounded above by a rigid lid. + The "reaction rate" is the rate at which the reactants of a chemical reaction form the products. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. The rate of "hydroxyl radical destruction due to reaction with nmvoc" is the nmvoc reactivity with regard to reactions with OH. It is the weighted sum of the reactivity of all individual nmvoc species with OH. The chemical formula for the hydroxyl radical is OH. In chemistry, a "radical" is a highly reactive, and therefore short lived, species. The abbreviation "nmvoc" means non methane volatile organic compounds; "nmvoc" is the term used in standard names to describe the group of chemical species having this classification that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. - + + 1 + + + The phrase "ratio_of_X_to_Y" means X/Y. "X_volume" means the volume occupied by X within the grid cell. Pore volume is the volume of the porosity of the ground under natural, unfrozen conditions. This is often known as "ice saturation index". + + + + K s-1 + + + The quantity with standard name ratio_of_sea_water_potential_temperature_anomaly_to_relaxation_timescale is a correction term applied to modelled sea water potential temperature. The term is estimated as the deviation of model local sea water potential temperature from an observation-based climatology (e.g. World Ocean Database) weighted by a user-specified relaxation coefficient in s-1 (1/(relaxation timescale)). Potential temperature is the temperature a parcel of air or sea water would have if moved adiabatically to sea level pressure. The phrase "ratio_of_X_to_Y" means X/Y. The term "anomaly" means difference from climatology. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). + + + + s-1 + + + The quantity with standard name ratio_of_sea_water_practical_salinity_anomaly_to_relaxation_timescale is a correction term applied to modelled sea water practical salinity. The term is estimated as the deviation of model local sea water practical salinity from an observation-based climatology (e.g. World Ocean Database) weighted by a user-specified relaxation coefficient in s-1 (1/(relaxation timescale)). The phrase "ratio_of_X_to_Y" means X/Y. The term "anomaly" means difference from climatology. Practical Salinity, S_P, is a determination of the salinity of sea water, based on its electrical conductance. The measured conductance, corrected for temperature and pressure, is compared to the conductance of a standard potassium chloride solution, producing a value on the Practical Salinity Scale of 1978 (PSS-78). This name should not be used to describe salinity observations made before 1978, or ones not based on conductance measurements. Conversion of Practical Salinity to other precisely defined salinity measures should use the appropriate formulas specified by TEOS-10. Other standard names for precisely defined salinity quantities are sea_water_absolute_salinity (S_A); sea_water_preformed_salinity (S_*), sea_water_reference_salinity (S_R); sea_water_cox_salinity (S_C), used for salinity observations between 1967 and 1977; and sea_water_knudsen_salinity (S_K), used for salinity observations between 1901 and 1966. Salinity quantities that do not match any of the precise definitions should be given the more general standard name of sea_water_salinity. Reference: www.teos-10.org; Lewis, 1980 doi:10.1109/JOE.1980.1145448. + + + + sr + + + The ratio of volume extinction coefficient to volume backwards scattering coefficient by ranging instrument in air due to ambient aerosol particles (often called "lidar ratio") is the ratio of the "volume extinction coefficient" and the "volume backwards scattering coefficient of radiative flux by ranging instrument in air due to ambient aerosol particles". The ratio is assumed to be related to the same wavelength of incident radiation. "Ambient_aerosol" means that the aerosol is measured or modelled at the ambient state of pressure, temperature and relative humidity that exists in its immediate environment. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. + + + + m s-2 + + + Sea surface density is the density of sea water near the surface (including the part under sea-ice, if any). "component_derivative_of_X" means derivative of X with respect to distance in the component direction, which may be northward, southward, eastward, westward, x or y. The last two indicate derivatives along the axes of the grid, whether or not they are true longitude and latitude. "ratio_of_X_to_Y" means X/Y. "Ocean rigid lid pressure" means the pressure at the surface of an ocean model assuming that it is bounded above by a rigid lid. + + + m s-2 @@ -16576,7 +18765,14 @@ totals are summed to obtain the index. 1 - Realization is used to label a dimension that can be thought of asa statistical sample, e.g., labelling members of a model ensemble. + Realization is used to label a dimension that can be thought of as a statistical sample, e.g., labelling members of a model ensemble. + + + + W + + + Power of a radio wave, that was transmitted by an instrument and propagates in the air where it's scattered by the air due to which its properties change, and it is received again by an instrument. The "instrument" (examples are radar and lidar) is the device used to make the observation. The "scatterers" are what causes the transmitted signal to be returned to the instrument (examples are aerosols, hydrometeors and refractive index irregularities in the air). A standard name referring to the received power of the signal at the instrument. @@ -16586,6 +18782,27 @@ totals are summed to obtain the index. For models using a dimensionless vertical coordinate, for example, sigma, hybrid sigma-pressure or eta, the values of the vertical coordinate at the model levels are calculated relative to a reference level. "Reference air pressure" is the air pressure at the model reference level. It is a model-dependent constant. + + s + + + The period of time over which a parameter has been summarised (usually by averaging) in order to provide a reference (baseline) against which data has been compared. When a coordinate, scalar coordinate, or auxiliary coordinate variable with this standard name has bounds, then the bounds specify the beginning and end of the time period over which the reference was determined. If the reference represents an instant in time, rather than a period, then bounds may be omitted. It is not the time for which the actual measurements are valid; the standard name of time should be used for that. + + + + mol/mol + + + This ozone field acts as a reference ozone field in a diagnostic call to the model's radiation scheme. Mole fraction is used in the construction mole_fraction_of_X_in_Y, where X is a material constituent of Y. + + + + Pa + + + A constant pressure value, typically representative of mean sea level pressure, which can be used in defining coordinates or functions of state. + + kg m-3 @@ -16597,7 +18814,7 @@ totals are summed to obtain the index. - A variable with the standard name of region contains strings which indicate geographical regions. These strings must be chosen from the standard region list. + A variable with the standard name of region contains either strings which indicate a geographical region or flags which can be translated to strings using flag_values and flag_meanings attributes. These strings are standardised. Values must be taken from the CF standard region list. @@ -16618,7 +18835,7 @@ totals are summed to obtain the index. degree - Standard names for "platform" describe the motion and orientation of the vehicle from which observations are made. Platforms include, but are not limited to, satellites, aeroplanes, ships, instruments and buoys. The quantity with standard name relative_platform_azimuth_angle is the difference between the viewing geometries from two different platforms over the same observation target. It is the difference between the values of two quantities with standard name platform_azimuth_angle. There is no standardized sign convention for relative_platform_azimuth_angle. "Observation target" means a location on the Earth defined by the sensor performing the observations. A standard name also exists for relative_sensor_azimuth_angle. For some viewing geometries the sensor and the platform cannot be assumed to be close enough to neglect the difference in calculated azimuth angle. + The quantity with standard name relative_platform_azimuth_angle is the difference between the viewing geometries from two different platforms over the same observation target. It is the difference between the values of two quantities with standard name platform_azimuth_angle. There is no standardized sign convention for relative_platform_azimuth_angle. "Observation target" means a location on the Earth defined by the sensor performing the observations. A standard name also exists for relative_sensor_azimuth_angle. For some viewing geometries the sensor and the platform cannot be assumed to be close enough to neglect the difference in calculated azimuth angle. A "platform" is a structure or vehicle that serves as a base for mounting sensors. Platforms include, but are not limited to, satellites, aeroplanes, ships, buoys, instruments, ground stations, and masts. @@ -16628,6 +18845,20 @@ totals are summed to obtain the index. relative_sensor_azimuth_angle is the difference between the viewing geometries from two different sensors over the same observation target. It is the difference between the values of two quantities with standard name sensor_azimuth_angle. There is no standardized sign convention for relative_sensor_azimuth_angle. "Observation target" means a location on the Earth defined by the sensor performing the observations. A standard name also exists for relative_platform_azimuth_angle, where "platform" refers to the vehicle from which observations are made e.g. aeroplane, ship, or satellite. For some viewing geometries the sensor and the platform cannot be assumed to be close enough to neglect the difference in calculated azimuth angle. + + 1 + + + Logarithmic scale averaging kernels of the methane mole fractions obtained by a remote sensing observation (Rodgers, 2020). These kernels are also called fractional averaging kernels (Keppens et al., 2015) They represent the fractional changes of methane in the retrieved atmosphere relative to the fractional changes of methane in the true atmosphere. + + + + 1 + + + Averaging kernels of the methane mole fractions obtained by a remote sensing observation (changes of methane in the retrieved atmosphere relative to the changes of methane in the true atmosphere, Rodgers 2000). + + 1 @@ -16635,6 +18866,20 @@ totals are summed to obtain the index. Richardson number is a measure of dynamic stability and can be used to diagnose the existence of turbulent flow. It is defined as the ratio of the buoyant suppression of turbulence (i.e. how statically stable or unstable the conditions are) to the kinetic energy available to generate turbulence in a shear flow. + + 1 + + + Right singular vectors of the matrix representing the logarithmic scale remote sensing averaging kernels (Weber 2019; Schneider et al., 2022) of the methane mole fractions obtained by a remote sensing observation (changes of methane in the retrieved atmosphere relative to the changes of methane in the true atmosphere, Rodgers 2000; Keppens et al., 2015). + + + + 1 + + + Right singular vectors of the matrix representing the remote sensing averaging kernels (Weber 2019; Schneider et al., 2022) of the methane mole fractions obtained by a remote sensing observation (changes of methane in the retrieved atmosphere relative to the changes of methane in the true atmosphere, Rodgers 2000). + + m @@ -16702,14 +18947,14 @@ totals are summed to obtain the index. 1 - A variable with the standard name of scene_type_of_dvorak_tropical_cyclone_cloud_region contains integers which can be translated to strings using flag_values and flag_meanings attributes. It indicates the Advanced Dvorak Technique tropical cyclone cloud region scene type chosen from the following list: uniform_central_dense_overcast; embedded_center; irregular_central_dense_overcast; curved_band; shear. Alternatively, the data variable may contain strings chosen from the same standardised list to indicate the scene type. Reference: Olander, T. L., & Velden, C. S., The Advanced Dvorak Technique: Continued Development of an Objective Scheme to Estimate Tropical Cyclone Intensity Using Geostationary Infrared Satellite Imagery (2007). American Meterorological Society Weather and Forecasting, 22, 287-298. + A variable with the standard name of scene_type_of_dvorak_tropical_cyclone_cloud_region contains integers which can be translated to strings using flag_values and flag_meanings attributes. It indicates the Advanced Dvorak Technique tropical cyclone cloud region scene type chosen from the following list: uniform_central_dense_overcast; embedded_center; irregular_central_dense_overcast; curved_band; shear. Alternatively, the data variable may contain strings chosen from the same standardised list to indicate the scene type. Reference: Olander, T. L., & Velden, C. S., The Advanced Dvorak Technique: Continued Development of an Objective Scheme to Estimate Tropical Cyclone Intensity Using Geostationary Infrared Satellite Imagery (2007). American Meteorological Society Weather and Forecasting, 22, 287-298. 1 - A variable with the standard name of scene_type_of_dvorak_tropical_cyclone_eye_region contains integers which can be translated to strings using flag_values and flag_meanings attributes. It indicates the Advanced Dvorak Technique tropical cyclone eye region scene type chosen from the following list: clear_ragged_or_obscured_eye; pinhole_eye; large_eye; no_eye. Alternatively, the data variable may contain strings chosen from the same standardised list to indicate the scene type. Reference: Olander, T. L., & Velden, C. S., The Advanced Dvorak Technique: Continued Development of an Objective Scheme to Estimate Tropical Cyclone Intensity Using Geostationary Infrared Satellite Imagery (2007). American Meterorological Society Weather and Forecasting, 22, 287-298. + A variable with the standard name of scene_type_of_dvorak_tropical_cyclone_eye_region contains integers which can be translated to strings using flag_values and flag_meanings attributes. It indicates the Advanced Dvorak Technique tropical cyclone eye region scene type chosen from the following list: clear_ragged_or_obscured_eye; pinhole_eye; large_eye; no_eye. Alternatively, the data variable may contain strings chosen from the same standardised list to indicate the scene type. Reference: Olander, T. L., & Velden, C. S., The Advanced Dvorak Technique: Continued Development of an Objective Scheme to Estimate Tropical Cyclone Intensity Using Geostationary Infrared Satellite Imagery (2007). American Meteorological Society Weather and Forecasting, 22, 287-298. @@ -16723,7 +18968,7 @@ totals are summed to obtain the index. 1 - "X_area_fraction" means the fraction of horizontal area occupied by X. "X_area" means the horizontal area occupied by X within the grid cell. + "Area fraction" is the fraction of a grid cell's horizontal area that has some characteristic of interest. It is evaluated as the area of interest divided by the grid cell area. It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. @@ -16768,6 +19013,20 @@ totals are summed to obtain the index. The sea_floor_depth_below_sea_surface is the vertical distance between the sea surface and the seabed as measured at a given point in space including the variance caused by tides and possibly waves. + + s + + + "Sea floor sediment" is sediment deposited at the sea bed. "Sediment age" means the length of time elapsed since the sediment was deposited. The phrase "before_1950" is a transparent representation of the phrase "before_present", often used in the geological and archaeological domains to refer to time elapsed between an event and 1950 AD. + + + + m + + + The average size of grains (also known as particles) in a sediment sample. + + 1 @@ -16786,7 +19045,7 @@ totals are summed to obtain the index. kg m-2 - "Amount" means mass per unit area. Surface amount refers to the amount on the ground, excluding that on the plant or vegetation canopy. The phrase "surface_snow" means snow lying on the surface. "Sea ice" means all ice floating in the sea which has formed from freezing sea water, rather than by other processes such as calving of land ice to form icebergs. + "Amount" means mass per unit area. Surface snow amount refers to the amount on the solid ground or on surface ice cover, but excludes, for example, falling snowflakes and snow on plants. "Sea ice" means all ice floating in the sea which has formed from freezing sea water, rather than by other processes such as calving of land ice to form icebergs. @@ -16800,7 +19059,7 @@ totals are summed to obtain the index. 1 91 sic - "X_area_fraction" means the fraction of horizontal area occupied by X. "X_area" means the horizontal area occupied by X within the grid cell. Sea ice area fraction is area of the sea surface occupied by sea ice. It is also called "sea ice concentration". "Sea ice" means all ice floating in the sea which has formed from freezing sea water, rather than by other processes such as calving of land ice to form icebergs. + "Area fraction" is the fraction of a grid cell's horizontal area that has some characteristic of interest. It is evaluated as the area of interest divided by the grid cell area. It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. Sea ice area fraction is area of the sea surface occupied by sea ice. It is also called "sea ice concentration". "Sea ice" means all ice floating in the sea which has formed from freezing sea water, rather than by other processes such as calving of land ice to form icebergs. @@ -16828,11 +19087,11 @@ totals are summed to obtain the index. K - "Sea ice" means all ice floating in the sea which has formed from freezing sea water, rather than by other processes such as calving of land ice to form icebergs. The standard name sea_ice_basal_temperature means the temperature of the sea ice at its lower boundary. + "Sea ice" means all ice floating in the sea which has formed from freezing sea water, rather than by other processes such as calving of land ice to form icebergs. The standard name sea_ice_basal_temperature means the temperature of the sea ice at its lower boundary. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). - 1 + A variable with the standard name of sea_ice_classification contains strings which indicate the character of the ice surface e.g. open_ice, or first_year_ice. These strings have not yet been standardised. However, and whenever possible, they should follow the terminology defined in the WMO Standard Nomenclature for Sea Ice Classification. Alternatively, the data variable may contain integers which can be translated to strings using flag_values and flag_meanings attributes. "Sea ice" means all ice floating in the sea which has formed from freezing sea water, rather than by other processes such as calving of land ice to form icebergs. @@ -16852,6 +19111,13 @@ totals are summed to obtain the index. The term sea_ice_extent means the total area of all grid cells in which the sea ice area fraction equals or exceeds a threshold, often chosen to be 15 per cent. The threshold must be specified by supplying a coordinate variable or scalar coordinate variable with the standard name of sea_ice_area_fraction. The horizontal domain over which sea ice extent is calculated is described by the associated coordinate variables and coordinate bounds or by a coordinate variable or scalar coordinate variable with the standard name of "region" supplied according to section 6.1.1 of the CF conventions. "Sea ice" means all ice floating in the sea which has formed from freezing sea water, rather than by other processes such as calving of land ice to form icebergs. + + m + + + "Sea ice" means all ice floating in the sea which has formed from freezing sea water, rather than by other processes such as calving of land ice to form icebergs. An ice floe is a flat expanse of sea ice, generally taken to be less than 10 km across. ice_floe_diameter corresponds to the diameter of a circle with the same area as the ice floe. + + m @@ -16898,14 +19164,14 @@ totals are summed to obtain the index. K - The surface temperature is the (skin) temperature at the interface, not the bulk temperature of the medium above or below. "Sea ice surface temperature" is the temperature that exists at the interface of sea ice and an overlying medium which may be air or snow. In areas of snow covered sea ice, sea_ice_surface_temperature is not the same as the quantity with standard name surface_temperature. "Sea ice" means all ice floating in the sea which has formed from freezing sea water, rather than by other processes such as calving of land ice to form icebergs. + The surface temperature is the (skin) temperature at the interface, not the bulk temperature of the medium above or below. "Sea ice surface temperature" is the temperature that exists at the interface of sea ice and an overlying medium which may be air or snow. In areas of snow covered sea ice, sea_ice_surface_temperature is not the same as the quantity with standard name surface_temperature. "Sea ice" means all ice floating in the sea which has formed from freezing sea water, rather than by other processes such as calving of land ice to form icebergs. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K - Sea ice temperature is the bulk temperature of the sea ice, not the surface (skin) temperature. "Sea ice" means all ice floating in the sea which has formed from freezing sea water, rather than by other processes such as calving of land ice to form icebergs. + Sea ice temperature is the bulk temperature of the sea ice, not the surface (skin) temperature. "Sea ice" means all ice floating in the sea which has formed from freezing sea water, rather than by other processes such as calving of land ice to form icebergs. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). @@ -17027,11 +19293,25 @@ totals are summed to obtain the index. Sea surface density is the density of sea water near the surface (including the part under sea-ice, if any). + + Pa + + + The surface called "sea surface" means the upper boundary of the ocean. "Surface stress" means the shear stress (force per unit area) exerted at the surface. A downward stress is a downward flux of momentum. Over large bodies of water, surface stress can drive near-surface currents. "Downward" indicates a vector component which is positive when directed downward (negative upward). "Eastward" indicates a vector component which is positive when directed northward (negative southward). "Downward eastward" indicates the ZX component of a tensor. A downward eastward stress is a downward flux of eastward momentum, which accelerates the lower medium eastward and the upper medium westward. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. The phrase "dissipation_of_sea_surface_waves" means the stress associated with sea surface waves dissipation processes such as whitecapping. + + + + Pa + + + The surface called "sea surface" means the upper boundary of the ocean. "Surface stress" means the shear stress (force per unit area) exerted at the surface. A downward stress is a downward flux of momentum. Over large bodies of water, surface stress can drive near-surface currents. "Downward" indicates a vector component which is positive when directed downward (negative upward). "Northward" indicates a vector component which is positive when directed northward (negative southward). "Downward northward" indicates the ZY component of a tensor. A downward northward stress is a downward flux of northward momentum, which accelerates the lower medium northward and the upper medium southward. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. The phrase "dissipation_of_sea_surface_waves" means the stress associated with sea surface waves dissipation processes such as whitecapping. + + K - The sea surface foundation temperature is the water temperature that is not influenced by a thermally stratified layer of diurnal temperature variability (either by daytime warming or nocturnal cooling). The foundation temperature is named to indicate that it is the temperature from which the growth of the diurnal thermocline develops each day, noting that on some occasions with a deep mixed layer there is no clear foundation temperature in the surface layer. In general, sea surface foundation temperature will be similar to a night time minimum or pre-dawn value at depths of between approximately 1 and 5 meters. In the absence of any diurnal signal, the foundation temperature is considered equivalent to the quantity with standard name sea_surface_subskin_temperature. The sea surface foundation temperature defines a level in the upper water column that varies in depth, space, and time depending on the local balance between thermal stratification and turbulent energy and is expected to change slowly over the course of a day. If possible, a data variable with the standard name sea_surface_foundation_temperature should be used with a scalar vertical coordinate variable to specify the depth of the foundation level. Sea surface foundation temperature is measured at the base of the diurnal thermocline or as close to the water surface as possible in the absence of thermal stratification. Only in situ contact thermometry is able to measure the sea surface foundation temperature. Analysis procedures must be used to estimate sea surface foundation temperature value from radiometric satellite measurements of the quantities with standard names sea_surface_skin_temperature and sea_surface_subskin_temperature. Sea surface foundation temperature provides a connection with the historical concept of a "bulk" sea surface temperature considered representative of the oceanic mixed layer temperature that is typically represented by any sea temperature measurement within the upper ocean over a depth range of 1 to approximately 20 meters. The general term, "bulk" sea surface temperature, has the standard name sea_surface_temperature with no associated vertical coordinate axis. Sea surface foundation temperature provides a more precise, well defined quantity than "bulk" sea surface temperature and, consequently, is more representative of the mixed layer temperature. The temperature of sea water at a particular depth (other than the foundation level) should be reported using the standard name sea_water_temperature and, wherever possible, supplying a vertical coordinate axis or scalar coordinate variable. + The sea surface foundation temperature is the water temperature that is not influenced by a thermally stratified layer of diurnal temperature variability (either by daytime warming or nocturnal cooling). The foundation temperature is named to indicate that it is the temperature from which the growth of the diurnal thermocline develops each day, noting that on some occasions with a deep mixed layer there is no clear foundation temperature in the surface layer. In general, sea surface foundation temperature will be similar to a night time minimum or pre-dawn value at depths of between approximately 1 and 5 meters. In the absence of any diurnal signal, the foundation temperature is considered equivalent to the quantity with standard name sea_surface_subskin_temperature. The sea surface foundation temperature defines a level in the upper water column that varies in depth, space, and time depending on the local balance between thermal stratification and turbulent energy and is expected to change slowly over the course of a day. If possible, a data variable with the standard name sea_surface_foundation_temperature should be used with a scalar vertical coordinate variable to specify the depth of the foundation level. Sea surface foundation temperature is measured at the base of the diurnal thermocline or as close to the water surface as possible in the absence of thermal stratification. Only in situ contact thermometry is able to measure the sea surface foundation temperature. Analysis procedures must be used to estimate sea surface foundation temperature value from radiometric satellite measurements of the quantities with standard names sea_surface_skin_temperature and sea_surface_subskin_temperature. Sea surface foundation temperature provides a connection with the historical concept of a "bulk" sea surface temperature considered representative of the oceanic mixed layer temperature that is typically represented by any sea temperature measurement within the upper ocean over a depth range of 1 to approximately 20 meters. The general term, "bulk" sea surface temperature, has the standard name sea_surface_temperature with no associated vertical coordinate axis. Sea surface foundation temperature provides a more precise, well defined quantity than "bulk" sea surface temperature and, consequently, is more representative of the mixed layer temperature. The temperature of sea water at a particular depth (other than the foundation level) should be reported using the standard name sea_water_temperature and, wherever possible, supplying a vertical coordinate axis or scalar coordinate variable. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). @@ -17101,7 +19381,7 @@ totals are summed to obtain the index. m - The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Altimeter pulses tend to be more strongly refelected by the troughs of sea surface waves than by the crests leading to a bias in the measured sea surface height. This quantity is commonly known as "sea state bias". + Altimeter pulses tend to be more strongly reflected by the troughs of sea surface waves than by the crests leading to a bias in the measured sea surface height. This quantity is commonly known as "sea state bias". "Sea surface height" is a time-varying quantity. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. @@ -17118,6 +19398,34 @@ totals are summed to obtain the index. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Air pressure at low frequency" means variations in air pressure with periods longer than 20 days. These give rise to corresponding variations in sea surface topography. The quantity sea_surface_height_correction_due_to_air_pressure_at_low_frequency is commonly called the "inverted barometer effect" and the correction should be applied by adding it to the quantity with standard name altimeter_range. Additional altimeter range corrections are given by the quantities with standard names altimeter_range_correction_due_to_wet_troposphere, altimeter_range_correction_due_to_dry_troposphere, altimeter_range_correction_due_to_ionosphere and sea_surface_height_correction_due_to_air_pressure_and_wind_at_high_frequency. + + m + + + Significant wave height is a statistic computed from wave measurements and corresponds to the average height of the highest one third of the waves, where the height is defined as the vertical distance from a wave trough to the following wave crest. Infragravity waves are waves occurring in the frequency range 0.04 to 0.004 s^-1 (wave periods of 25 to 250 seconds). + + + + 1 + + + Wave slope describes an aspect of sea surface wave geometry related to sea surface roughness. Mean square slope describes a derivation over multiple waves within a sea-state, for example calculated from moments of the wave directional spectrum. The phrase "crosswave_slope" means that slope values are derived from vector components across (normal to) the axis from which waves are travelling. The primary directional axis along which wave energy associated with the slope calculation is travelling has the standard name sea_surface_mean_square_upwave_slope_direction. + + + + 1 + + + Wave slope describes an aspect of sea surface wave geometry related to sea surface roughness. Mean square slope describes a derivation over multiple waves within a sea-state, for example calculated from moments of the wave directional spectrum. The phrase "upwave_slope" means that slope values are derived from vector components along (parallel to) the axis from which waves are travelling. The primary directional axis along which wave energy associated with the slope calculation is travelling has the standard name sea_surface_mean_square_upwave_slope_direction. + + + + degree + + + Wave slope describes an aspect of sea surface wave geometry related to sea surface roughness. Mean square slope describes a derivation over multiple waves within a sea-state, for example calculated from moments of the wave directional spectrum. The phrase "upwave_slope_direction" is used to assign a primary directional axis along which wave energy associated with the slope calculation is travelling; "upwave" is equivalent to "from_direction" which is used in some standard names. + + degree @@ -17139,6 +19447,13 @@ totals are summed to obtain the index. The quantity with standard name sea_surface_primary_swell_wave_from_direction is the direction from which the most energetic swell waves are coming. Swell waves are waves on the ocean surface and are the low frequency portion of a bimodal wave frequency spectrum. The primary swell wave is the most energetic swell wave. The phrase "from_direction" is used in the construction X_from_direction and indicates the direction from which the velocity vector of X is coming. The direction is a bearing in the usual geographical sense, measured positive clockwise from due north. + + degree + + + The quantity with standard name sea_surface_primary_swell_wave_from_direction_at_variance_spectral_density_maximum is the direction from which the most energetic waves are coming in the primary swell wave component of a sea. Swell waves are waves on the ocean surface and are the low frequency portion of a bimodal wave frequency spectrum. The primary swell wave is the most energetic swell wave in the low frequency portion of a bimodal wave frequency spectrum. The spectral peak is the most energetic wave in the wave spectrum partition. The phrase "from_direction" is used in the construction X_from_direction and indicates the direction from which the velocity vector of X is coming. The direction is a bearing in the usual geographical sense, measured positive clockwise from due north. The wave directional spectrum can be written as a five dimensional function S(t,x,y,f,theta) where t is time, x and y are horizontal coordinates (such as longitude and latitude), f is frequency and theta is direction. S has the standard name sea_surface_wave_directional_variance_spectral_density. S can be integrated over direction to give S1= integral(S dtheta) and this quantity has the standard name sea_surface_wave_variance_spectral_density. + + s @@ -17188,6 +19503,13 @@ totals are summed to obtain the index. The quantity with standard name sea_surface_secondary_swell_wave_from_direction is the direction from which the second most energetic swell waves are coming. Swell waves are waves on the ocean surface and are the low frequency portion of a bimodal wave frequency spectrum. The secondary swell wave is the second most energetic wave in the low frequency portion of a bimodal wave frequency spectrum. The phrase "from_direction" is used in the construction X_from_direction and indicates the direction from which the velocity vector of X is coming. The direction is a bearing in the usual geographical sense, measured positive clockwise from due north. + + degree + + + The quantity with standard name sea_surface_secondary_swell_wave_from_direction_at_variance_spectral_density_maximum is the direction from which the most energetic waves are coming in the secondary swell wave component of a sea. Swell waves are waves on the ocean surface and are the low frequency portion of a bimodal wave frequency spectrum. The secondary swell wave is the second most energetic wave in the low frequency portion of a bimodal wave frequency spectrum. The spectral peak is the most energetic wave in the wave spectrum partition. The phrase "from_direction" is used in the construction X_from_direction and indicates the direction from which the velocity vector of X is coming. The direction is a bearing in the usual geographical sense, measured positive clockwise from due north. The wave directional spectrum can be written as a five dimensional function S(t,x,y,f,theta) where t is time, x and y are horizontal coordinates (such as longitude and latitude), f is frequency and theta is direction. S has the standard name sea_surface_wave_directional_variance_spectral_density. S can be integrated over direction to give S1= integral(S dtheta) and this quantity has the standard name sea_surface_wave_variance_spectral_density. + + s @@ -17213,14 +19535,21 @@ totals are summed to obtain the index. K - The sea surface skin temperature is the temperature measured by an infrared radiometer typically operating at wavelengths in the range 3.7 - 12 micrometers. It represents the temperature within the conductive diffusion-dominated sub-layer at a depth of approximately 10 - 20 micrometers below the air-sea interface. Measurements of this quantity are subject to a large potential diurnal cycle including cool skin layer effects (especially at night under clear skies and low wind speed conditions) and warm layer effects in the daytime. + The sea surface skin temperature is the temperature measured by an infrared radiometer typically operating at wavelengths in the range 3.7 - 12 micrometers. It represents the temperature within the conductive diffusion-dominated sub-layer at a depth of approximately 10 - 20 micrometers below the air-sea interface. Measurements of this quantity are subject to a large potential diurnal cycle including cool skin layer effects (especially at night under clear skies and low wind speed conditions) and warm layer effects in the daytime. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K - The sea surface subskin temperature is the temperature at the base of the conductive laminar sub-layer of the ocean surface, that is, at a depth of approximately 1 - 1.5 millimeters below the air-sea interface. For practical purposes, this quantity can be well approximated to the measurement of surface temperature by a microwave radiometer operating in the 6 - 11 gigahertz frequency range, but the relationship is neither direct nor invariant to changing physical conditions or to the specific geometry of the microwave measurements. Measurements of this quantity are subject to a large potential diurnal cycle due to thermal stratification of the upper ocean layer in low wind speed high solar irradiance conditions. + The sea surface subskin temperature is the temperature at the base of the conductive laminar sub-layer of the ocean surface, that is, at a depth of approximately 1 - 1.5 millimeters below the air-sea interface. For practical purposes, this quantity can be well approximated to the measurement of surface temperature by a microwave radiometer operating in the 6 - 11 gigahertz frequency range, but the relationship is neither direct nor invariant to changing physical conditions or to the specific geometry of the microwave measurements. Measurements of this quantity are subject to a large potential diurnal cycle due to thermal stratification of the upper ocean layer in low wind speed high solar irradiance conditions. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). + + + + degree + + + The quantity with standard name sea_surface_swell_wave_directional_spread is the directional width of the swell wave component of a sea. Swell waves are waves on the ocean surface and are the low frequency portion of a bimodal wave frequency spectrum. Directional spread is the (one-sided) directional width within a given sub-domain of the wave directional spectrum, S(t,x,y,f,theta) where t is time, x and y are horizontal coordinates (such as longitude and latitude), f is frequency and theta is direction. For a given mean wave (beam) direction the quantity approximates half the root mean square width about the beam axis, as derived either directly from circular moments or via the Fourier components of the wave directional spectrum. @@ -17230,6 +19559,13 @@ totals are summed to obtain the index. Swell waves are waves on the ocean surface and are the low frequency portion of a bimodal wave frequency spectrum. The phrase "from_direction" is used in the construction X_from_direction and indicates the direction from which the velocity vector of X is coming. The direction is a bearing in the usual geographical sense, measured positive clockwise from due north. + + degree + + + The quantity with standard name sea_surface_swell_wave_from_direction_at_variance_spectral_density_maximum is the direction from which the most energetic waves are coming in the swell wave component of a sea. Swell waves are waves on the ocean surface and are the low frequency portion of a bimodal wave frequency spectrum. The spectral peak is the most energetic wave in the wave spectrum partition. The phrase "from_direction" is used in the construction X_from_direction and indicates the direction from which the velocity vector of X is coming. The direction is a bearing in the usual geographical sense, measured positive clockwise from due north. The swell wave directional spectrum can be written as a five dimensional function S(t,x,y,f,theta) where t is time, x and y are horizontal coordinates (such as longitude and latitude), f is frequency and theta is direction. S has the standard name sea_surface_wave_directional_variance_spectral_density. S can be integrated over direction to give S1= integral(S dtheta) and this quantity has the standard name sea_surface_wave_variance_spectral_density. + + s @@ -17265,6 +19601,13 @@ totals are summed to obtain the index. A period is an interval of time, or the time-period of an oscillation. Swell waves are waves on the ocean surface. + + s + + + The quantity with standard name sea_surface_swell_wave_period_at_variance_spectral_density_maximum is the period of the most energetic waves within the swell wave component of a sea. Swell waves are waves on the ocean surface and are the low frequency portion of a bimodal wave frequency spectrum. A period is an interval of time, or the time-period of an oscillation. Wave period is the interval of time between repeated features on the waveform such as crests, troughs or upward passes through the mean level. The phrase "wave_period_at_variance_spectral_density_maximum", sometimes called peak wave period, describes the period of the most energetic waves within a given sub-domain of the wave spectrum. + + m 105 @@ -17283,7 +19626,7 @@ totals are summed to obtain the index. K - Sea surface temperature is usually abbreviated as "SST". It is the temperature of sea water near the surface (including the part under sea-ice, if any). More specific terms, namely sea_surface_skin_temperature, sea_surface_subskin_temperature, and surface_termperature are available for the skin, subskin, and interface temperature. respectively. For the temperature of sea water at a particular depth or layer, a data variable of sea_water_temperature with a vertical coordinate axis should be used.’ + Sea surface temperature is usually abbreviated as "SST". It is the temperature of sea water near the surface (including the part under sea-ice, if any). More specific terms, namely sea_surface_skin_temperature, sea_surface_subskin_temperature, and surface_temperature are available for the skin, subskin, and interface temperature. respectively. For the temperature of sea water at a particular depth or layer, a data variable of sea_water_temperature with a vertical coordinate axis should be used. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). @@ -17307,6 +19650,13 @@ totals are summed to obtain the index. The quantity with standard name sea_surface_tertiary_swell_wave_from_direction is the direction from which the third most energetic swell waves are coming. Swell waves are waves on the ocean surface and are the low frequency portion of a bimodal wave frequency spectrum. The tertiary swell wave is the third most energetic swell wave. The phrase "from_direction" is used in the construction X_from_direction and indicates the direction from which the velocity vector of X is coming. The direction is a bearing in the usual geographical sense, measured positive clockwise from due north. + + degree + + + The quantity with standard name sea_surface_tertiary_swell_wave_from_direction_at_variance_spectral_density_maximum is the direction from which the most energetic waves are coming in the tertiary swell wave component of a sea. Swell waves are waves on the ocean surface and are the low frequency portion of a bimodal wave frequency spectrum. The tertiary swell wave is the third most energetic swell wave in the low frequency portion of a bimodal wave frequency spectrum. The spectral peak is the most energetic wave in the wave spectrum partition. The phrase "from_direction" is used in the construction X_from_direction and indicates the direction from which the velocity vector of X is coming. The direction is a bearing in the usual geographical sense, measured positive clockwise from due north. The wave directional spectrum can be written as a five dimensional function S(t,x,y,f,theta) where t is time, x and y are horizontal coordinates (such as longitude and latitude), f is frequency and theta is direction. S has the standard name sea_surface_wave_directional_variance_spectral_density. S can be integrated over direction to give S1= integral(S dtheta) and this quantity has the standard name sea_surface_wave_variance_spectral_density. + + s @@ -17335,6 +19685,13 @@ totals are summed to obtain the index. Directional spread is the (one-sided) directional width within a given sub-domain of the wave directional spectrum, S(t,x,y,f,theta) where t is time, x and y are horizontal coordinates (such as longitude and latitude), f is frequency and theta is direction. For a given mean wave (beam) direction the quantity approximates half the root mean square width about the beam axis, as derived either directly from circular moments or via the Fourier components of the wave directional spectrum. + + degree + + + The quantity with standard name sea_surface_wave_directional_spread_at_variance_spectral_density_maximum is the directional spread of the most energetic waves. Directional spread is the (one-sided) directional width within a given sub-domain of the wave directional spectrum, S(t,x,y,f,theta) where t is time, x and y are horizontal coordinates (such as longitude and latitude), f is frequency and theta is direction. For a given mean wave (beam) direction the quantity approximates half the root mean square width about the beam axis, as derived either directly from circular moments or via the Fourier components of the wave directional spectrum. + + m2 s rad-1 @@ -17349,6 +19706,20 @@ totals are summed to obtain the index. The wave directional spectrum can be written as a five dimensional function S(t,x,y,f,theta) where t is time, x and y are horizontal coordinates (such as longitude and latitude), f is frequency and theta is direction. S has the standard name sea_surface_wave_directional_variance_spectral_density. S can be integrated over direction to give S1= integral(S dtheta) and this quantity has the standard name sea_surface_wave_variance_spectral_density. The quantity with standard name sea_surface_wave_energy_at_variance_spectral_density_maximum, sometimes called peak wave energy, is the maximum value of the variance spectral density (max(S1)). + + W m-1 + + + Wave energy flux, or wave power, is the average rate of transfer of wave energy through a vertical plane of unit width perpendicular to the direction of wave propagation. It should be understood as omnidirectional, or as the sum of all wave power components regardless of direction. In deep water conditions, the wave energy flux can be obtained with the water density, the wave significant height and the energy period. + + + + s-1 + + + Frequency is the number of oscillations of a wave per unit time. The sea_surface_wave_frequency_at_variance_spectral_density_maximum is the frequency of the most energetic waves in the total wave spectrum at a specific location. The wave directional spectrum can be written as a five dimensional function S(t,x,y,f,theta) where t is time, x and y are horizontal coordinates (such as longitude and latitude), f is frequency and theta is direction. S has the standard name sea_surface_wave_directional_variance_spectral_density. S can be integrated over direction to give S1= integral(S dtheta) and this quantity has the standard name sea_surface_wave_variance_spectral_density. + + degree @@ -17398,6 +19769,13 @@ totals are summed to obtain the index. The trough is the lowest point of a wave. Trough depth is the vertical distance between the trough and the calm sea surface. Maximum trough depth is the maximum value measured during the observation period. + + degrees + + + The wave direction in each frequency band, calculated from the first-order components of the wave directional spectrum. The full directional wave spectrum is described as a Fourier series: S = a0/2 + a1cos(theta) + b1sin(theta) + a2cos(2theta) + b2sin(2theta). The Fourier coefficients a1, b1, a2, & b2 can be converted to polar coordinates as follows: R1 = (SQRT(a1a1+b1b1))/a0, R2 = (SQRT(a2a2+b2b2))/a0, ALPHA1 = 270.0-ARCTAN(b1,a1), ALPHA2 = 270.0-(0.5*ARCTAN(b2,a2)+{0 or 180, whichever minimizes the difference between ALPHA1 and ALPHA2}). ALPHA1 is the mean wave direction, which is determined from the first-order Fourier coefficients. This spectral parameter is a separate quantity from the bulk parameter (MWDIR), which has the standard name sea_surface_wave_from_direction_at_variance_spectral_density_maximum. The phrase "from_direction" is used in the construction X_from_direction and indicates the direction from which the velocity vector of X is coming. The direction is a bearing in the usual geographical sense, measured positive clockwise from due north. + + m @@ -17447,11 +19825,46 @@ totals are summed to obtain the index. Wave mean period is the mean period measured over the observation duration. The quantity with standard name sea_surface_wave_mean_period_of_highest_tenth is the mean period of the highest one-tenth of waves during the observation duration. A period is an interval of time, or the time-period of an oscillation. Wave period is the interval of time between repeated features on the waveform such as crests, troughs or upward passes through the mean level. + + 1 + + + Wave slope describes an aspect of sea surface wave geometry related to sea surface roughness. Mean square slope describes a derivation over multiple waves within a sea-state, for example calculated from moments of the wave directional spectrum. + + + + 1 + + + Wave slope describes an aspect of sea surface wave geometry related to sea surface roughness. Mean square slope describes a derivation over multiple waves within a sea-state, for example calculated from moments of the wave directional spectrum. The phrase "x_slope" indicates that slope values are derived from vector components along the grid x-axis. + + + + 1 + + + Wave slope describes an aspect of sea surface wave geometry related to sea surface roughness. Mean square slope describes a derivation over multiple waves within a sea-state, for example calculated from moments of the wave directional spectrum. The phrase "y_slope" indicates that slope values are derived from vector components along the grid y-axis. + + + + m + + + The wave directional spectrum can be written as a five dimensional function S(t,x,y,k,theta) where t is time, x and y are horizontal coordinates (such as longitude and latitude), k is wavenumber and theta is direction. S has the standard name sea_surface_wave_directional_variance_spectral_density. S can be integrated over direction to give S1= integral(S dtheta) and this quantity has the standard name sea_surface_wave_variance_spectral_density. Wavenumber is the number of oscillations of a wave per unit distance. Wavenumber moments, M(n) of S1 can then be calculated as follows: M(n) = integral(S1 k^n dk), where k^n is k to the power of n. The inverse wave wavenumber, k(m-1), is calculated as the ratio M(-1)/M(0). The wavelength is the horizontal distance between repeated features on the waveform such as crests, troughs or upward passes through the mean level. + + + + m-1 + + + The wave directional spectrum can be written as a five dimensional function S(t,x,y,k,theta) where t is time, x and y are horizontal coordinates (such as longitude and latitude), k is wavenumber and theta is direction. S has the standard name sea_surface_wave_directional_variance_spectral_density. S can be integrated over direction to give S1= integral(S dtheta) and this quantity has the standard name sea_surface_wave_variance_spectral_density. Wavenumber is the number of oscillations of a wave per unit distance. Wavenumber moments, M(n) of S1 can then be calculated as follows: M(n) = integral(S1 k^n dk), where k^n is k to the power of n. The mean wavenumber, k(1), is calculated as the ratio M(1)/M(0). + + s - A period is an interval of time, or the time-period of an oscillation. The sea_surface_wave_period_at_variance_spectral_density_maximum, sometimes called peak wave period, is the period of the most energetic waves in the total wave spectrum at a specific location. + A period is an interval of time, or the time-period of an oscillation. Wave period is the interval of time between repeated features on the waveform such as crests, troughs or upward passes through the mean level. The sea_surface_wave_period_at_variance_spectral_density_maximum, sometimes called peak wave period, is the period of the most energetic waves in the total wave spectrum at a specific location. The wave directional spectrum can be written as a five dimensional function S(t,x,y,f,theta) where t is time, x and y are horizontal coordinates (such as longitude and latitude), f is frequency and theta is direction. S has the standard name sea_surface_wave_directional_variance_spectral_density. S can be integrated over direction to give S1= integral(S dtheta) and this quantity has the standard name sea_surface_wave_variance_spectral_density. @@ -17461,6 +19874,13 @@ totals are summed to obtain the index. Wave period of the highest wave is the period determined from wave crests corresponding to the greatest vertical distance above mean level during the observation period. A period is an interval of time, or the time-period of an oscillation. Wave period is the interval of time between repeated features on the waveform such as crests, troughs or upward passes through the mean level. + + degrees + + + The wave direction in each frequency band, calculated from the second-order components of the wave directional spectrum. Since there is an ambiguity of 180 degrees in the calculation of Alpha2 (i.e. 90 degrees and 270 degrees result in equivalent spectra), the value closer to Alpha1 is selected. The full directional wave spectrum is described as a Fourier series: S = a0/2 + a1cos(theta) + b1sin(theta) + a2cos(2theta) + b2sin(2theta). The Fourier coefficients a1, b1, a2, & b2 can be converted to polar coordinates as follows: R1 = (SQRT(a1a1+b1b1))/a0, R2 = (SQRT(a2a2+b2b2))/a0, ALPHA1 = 270.0-ARCTAN(b1,a1), ALPHA2 = 270.0-(0.5*ARCTAN(b2,a2)+{0 or 180, whichever minimizes the difference between ALPHA1 and ALPHA2}). ALPHA2 is the principal wave direction, which is determined from the second-order Fourier coefficients. This spectral parameter is a separate quantity from the bulk parameter (MWDIR), which has the standard name sea_surface_wave_from_direction_at_variance_spectral_density_maximum. The phrase "from_direction" is used in the construction X_from_direction and indicates the direction from which the velocity vector of X is coming. The direction is a bearing in the usual geographical sense, measured positive clockwise from due north. + + m 100 @@ -17475,6 +19895,34 @@ totals are summed to obtain the index. Significant wave period is a statistic computed from wave measurements and corresponds to the mean wave period of the highest one third of the waves. A period is an interval of time, or the time-period of an oscillation. Wave period is the interval of time between repeated features on the waveform such as crests, troughs or upward passes through the mean level. + + m s-1 + + + A velocity is a vector quantity. "Eastward" indicates a vector component which is positive when directed eastward (negative westward). The Stokes drift velocity is the average velocity when following a specific fluid parcel as it travels with the fluid flow. For instance, a particle floating at the free surface of water waves, experiences a net Stokes drift velocity in the direction of wave propagation. + + + + m s-1 + + + A velocity is a vector quantity. "Northward" indicates a vector component which is positive when directed northward (negative southward). The Stokes drift velocity is the average velocity when following a specific fluid parcel as it travels with the fluid flow. For instance, a particle floating at the free surface of water waves, experiences a net Stokes drift velocity in the direction of wave propagation. + + + + m s-1 + + + The Stokes drift velocity is the average velocity when following a specific fluid parcel as it travels with the fluid flow. For instance, a particle floating at the free surface of water waves, experiences a net Stokes drift velocity in the direction of wave propagation. Speed is the magnitude of velocity. + + + + degree + + + The Stokes drift velocity is the average velocity when following a specific fluid parcel as it travels with the fluid flow. For instance, a particle floating at the free surface of water waves, experiences a net Stokes drift velocity in the direction of wave propagation. The phrase "to_direction" is used in the construction X_to_direction and indicates the direction towards which the velocity vector of X is headed. The direction is a bearing in the usual geographical sense, measured positive clockwise from due north. + + m s-1 @@ -17503,6 +19951,27 @@ totals are summed to obtain the index. Sea surface wave variance spectral density is the variance of wave amplitude within a range of wave frequency. + + Pa + + + "Sea surface wave radiation stress" describes the excess momentum flux caused by sea surface waves. Radiation stresses behave as a second-order tensor. "xx" indicates the component of the tensor along the grid x_ axis. + + + + Pa + + + "Sea surface wave radiation stress" describes the excess momentum flux caused by sea surface waves. Radiation stresses behave as a second-order tensor. "xy" indicates the lateral contributions to x_ and y_ components of the tensor. + + + + Pa + + + "Sea surface wave radiation stress" describes the excess momentum flux caused by sea surface waves. Radiation stresses behave as a second-order tensor. "yy" indicates the component of the tensor along the grid y_ axis. + + degree @@ -17524,6 +19993,13 @@ totals are summed to obtain the index. Wind waves are waves on the ocean surface and are the high frequency portion of a bimodal wave frequency spectrum. The phrase "from_direction" is used in the construction X_from_direction and indicates the direction from which the velocity vector of X is coming. The direction is a bearing in the usual geographical sense, measured positive clockwise from due north. + + degree + + + The quantity with standard name sea_surface_wind_wave_from_direction_at_variance_spectral_density_maximum is the direction from which the most energetic waves are coming in the wind wave component of a sea. Wind waves are waves on the ocean surface and are the high frequency portion of a bimodal wave frequency spectrum. The spectral peak is the most energetic wave in the wave spectrum partition. The phrase "from_direction" is used in the construction X_from_direction and indicates the direction from which the velocity vector of X is coming. The direction is a bearing in the usual geographical sense, measured positive clockwise from due north. The wave directional spectrum can be written as a five dimensional function S(t,x,y,f,theta) where t is time, x and y are horizontal coordinates (such as longitude and latitude), f is frequency and theta is direction. S has the standard name sea_surface_wave_directional_variance_spectral_density. S can be integrated over direction to give S1= integral(S dtheta) and this quantity has the standard name sea_surface_wave_variance_spectral_density. + + s @@ -17591,14 +20067,14 @@ totals are summed to obtain the index. degree_C - The quantity with standard name sea_water_added_conservative_temperature is a passive tracer in an ocean model whose surface flux does not come from the atmosphere but is imposed externally upon the simulated climate system. The surface flux is expressed as a heat flux and converted to a passive tracer increment as if it were a heat flux being added to conservative temperature. The passive tracer is transported within the ocean as if it were conservative temperature. The passive tracer is zero in the control climate of the model. The passive tracer records added heat, as described for the CMIP6 FAFMIP experiment (doi:10.5194/gmd-9-3993-2016), following earlier ideas. Conservative Temperature is defined as part of the Thermodynamic Equation of Seawater 2010 (TEOS-10) which was adopted in 2010 by the International Oceanographic Commission (IOC). Conservative Temperature is specific potential enthalpy (which has the standard name sea_water_specific_potential_enthalpy) divided by a fixed value of the specific heat capacity of sea water, namely cp_0 = 3991.86795711963 J kg-1 K-1. Conservative Temperature is a more accurate measure of the "heat content" of sea water, by a factor of one hundred, than is potential temperature. Because of this, it can be regarded as being proportional to the heat content of sea water per unit mass. Reference: www.teos-10.org; McDougall, 2003 doi: 10.1175/1520-0485(2003)033<0945:PEACOV>2.0.CO;2. + The quantity with standard name sea_water_added_conservative_temperature is a passive tracer in an ocean model whose surface flux does not come from the atmosphere but is imposed externally upon the simulated climate system. The surface flux is expressed as a heat flux and converted to a passive tracer increment as if it were a heat flux being added to conservative temperature. The passive tracer is transported within the ocean as if it were conservative temperature. The passive tracer is zero in the control climate of the model. The passive tracer records added heat, as described for the CMIP6 FAFMIP experiment (doi:10.5194/gmd-9-3993-2016), following earlier ideas. Conservative Temperature is defined as part of the Thermodynamic Equation of Seawater 2010 (TEOS-10) which was adopted in 2010 by the International Oceanographic Commission (IOC). Conservative Temperature is specific potential enthalpy (which has the standard name sea_water_specific_potential_enthalpy) divided by a fixed value of the specific heat capacity of sea water, namely cp_0 = 3991.86795711963 J kg-1 K-1. Conservative Temperature is a more accurate measure of the "heat content" of sea water, by a factor of one hundred, than is potential temperature. Because of this, it can be regarded as being proportional to the heat content of sea water per unit mass. Reference: www.teos-10.org; McDougall, 2003 doi: 10.1175/1520-0485(2003)033<0945:PEACOV>2.0.CO;2. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). degree_C - The quantity with standard name sea_water_added_potential_temperature is a passive tracer in an ocean model whose surface flux does not come from the atmosphere but is imposed externally upon the simulated climate system. The surface flux is expressed as a heat flux and converted to a passive tracer increment as if it were a heat flux being added to potential temperature. The passive tracer is transported within the ocean as if it were potential temperature. The passive tracer is zero in the control climate of the model. The passive tracer records added heat, as described for the CMIP6 FAFMIP experiment (doi:10.5194/gmd-9-3993-2016), following earlier ideas. Potential temperature is the temperature a parcel of air or sea water would have if moved adiabatically to sea level pressure. + The quantity with standard name sea_water_added_potential_temperature is a passive tracer in an ocean model whose surface flux does not come from the atmosphere but is imposed externally upon the simulated climate system. The surface flux is expressed as a heat flux and converted to a passive tracer increment as if it were a heat flux being added to potential temperature. The passive tracer is transported within the ocean as if it were potential temperature. The passive tracer is zero in the control climate of the model. The passive tracer records added heat, as described for the CMIP6 FAFMIP experiment (doi:10.5194/gmd-9-3993-2016), following earlier ideas. Potential temperature is the temperature a parcel of air or sea water would have if moved adiabatically to sea level pressure. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). @@ -17622,11 +20098,18 @@ totals are summed to obtain the index. sea_water_alkalinity_expressed_as_mole_equivalent is the total alkalinity equivalent concentration (including carbonate, nitrogen, silicate, and borate components). In ocean biogeochemistry models, a "natural analogue" is used to simulate the effect on a modelled variable of imposing preindustrial atmospheric carbon dioxide concentrations, even when the model as a whole may be subjected to varying forcings. + + mol kg-1 + + + The standard name sea_water_alkalinity_per_unit_mass_expressed_as_mole_equivalent is the total alkalinity equivalent concentration (including carbonate, nitrogen, silicate, and borate components) expressed as the number of moles of alkalinity per unit mass of seawater. The phrase "expressed_as" is used in the construction "A_expressed_as_B", where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. The equivalent term in the NERC P01 Parameter Usage Vocabulary may be found at http://vocab.nerc.ac.uk/collection/P01/current/MDMAP014/1/. + + K - Conservative Temperature is defined as part of the Thermodynamic Equation of Seawater 2010 (TEOS-10) which was adopted in 2010 by the International Oceanographic Commission (IOC). Conservative Temperature is specific potential enthalpy (which has the standard name sea_water_specific_potential_enthalpy) divided by a fixed value of the specific heat capacity of sea water, namely cp_0 = 3991.86795711963 J kg-1 K-1. Conservative Temperature is a more accurate measure of the "heat content" of sea water, by a factor of one hundred, than is potential temperature. Because of this, it can be regarded as being proportional to the heat content of sea water per unit mass. Reference: www.teos-10.org; McDougall, 2003 doi: 10.1175/1520-0485(2003)033<0945:PEACOV>2.0.CO;2. + Conservative Temperature is defined as part of the Thermodynamic Equation of Seawater 2010 (TEOS-10) which was adopted in 2010 by the International Oceanographic Commission (IOC). Conservative Temperature is specific potential enthalpy (which has the standard name sea_water_specific_potential_enthalpy) divided by a fixed value of the specific heat capacity of sea water, namely cp_0 = 3991.86795711963 J kg-1 K-1. Conservative Temperature is a more accurate measure of the "heat content" of sea water, by a factor of one hundred, than is potential temperature. Because of this, it can be regarded as being proportional to the heat content of sea water per unit mass. Reference: www.teos-10.org; McDougall, 2003 doi: 10.1175/1520-0485(2003)033<0945:PEACOV>2.0.CO;2. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). @@ -17650,11 +20133,11 @@ totals are summed to obtain the index. - - degree + + S m-1 - The phrase "from_direction" is used in the construction X_from_direction and indicates the direction from which the velocity vector of X is coming. The direction is a bearing in the usual geographical sense, measured positive clockwise from due north. + The electrical conductivity of sea water in a sample measured at a defined reference temperature. The reference temperature should be recorded in a scalar coordinate variable, or a coordinate variable with a single dimension of size one, and the standard name of temperature_of_analysis_of_sea_water. This quantity is sometimes called 'specific conductivity' when the reference temperature 25 degrees Celsius. @@ -17717,21 +20200,21 @@ totals are summed to obtain the index. kg m-3 - Potential density is the density a parcel of air or sea water would have if moved adiabatically to a reference pressure, by default assumed to be sea level pressure. For sea water potential density, if 1000 kg m-3 is subtracted, the standard name sea_water_sigma_theta should be chosen instead. + Sea water potential density is the density a parcel of sea water would have if moved adiabatically to a reference pressure, by default assumed to be sea level pressure. To specify the reference pressure to which the quantity applies, provide a scalar coordinate variable with standard name reference_pressure. The density of a substance is its mass per unit volume. For sea water potential density, if 1000 kg m-3 is subtracted, the standard name sea_water_sigma_theta should be chosen instead. K - Potential temperature is the temperature a parcel of air or sea water would have if moved adiabatically to sea level pressure. + Sea water potential temperature is the temperature a parcel of sea water would have if moved adiabatically to sea level pressure. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K - Potential temperature is the temperature a parcel of air or sea water would have if moved adiabatically to sea level pressure. The potential temperature at the sea floor is that adjacent to the ocean bottom, which would be the deepest grid cell in an ocean model. + Potential temperature is the temperature a parcel of air or sea water would have if moved adiabatically to sea level pressure. The potential temperature at the sea floor is that adjacent to the ocean bottom, which would be the deepest grid cell in an ocean model and within the benthic boundary layer for measurements. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). @@ -17745,8 +20228,21 @@ totals are summed to obtain the index. 1 - Practical Salinity, S_P, is a determination of the salinity of sea water, based on its electrical conductance. The measured conductance, corrected for temperature and pressure, is compared to the conductance of a standard potassium chloride solution, producing a value on the Practical Salinity Scale of 1978 (PSS-78). This name should not be used to describe salinity observations made before 1978, or ones not based on conductance measurements. Conversion of Practical Salinity to other precisely defined salinity measures should use the appropriate formulas specified by TEOS-10. Other standard names for precisely defined salinity quantities are sea_water_absolute_salinity (S_A); sea_water_preformed_salinity (S_*), sea_water_reference_salinity (S_R); sea_water_cox_salinity (S_C), used for salinity observations between 1967 and 1977; and sea_water_knudsen_salinity (S_K), used for salinity observations between 1901 and 1966. Salinity quantities that do not match any of the precise definitions shoul - d be given the more general standard name of sea_water_salinity. Reference: www.teos-10.org; Lewis, 1980 doi:10.1109/JOE.1980.1145448. + Practical Salinity, S_P, is a determination of the salinity of sea water, based on its electrical conductance. The measured conductance, corrected for temperature and pressure, is compared to the conductance of a standard potassium chloride solution, producing a value on the Practical Salinity Scale of 1978 (PSS-78). This name should not be used to describe salinity observations made before 1978, or ones not based on conductance measurements. Conversion of Practical Salinity to other precisely defined salinity measures should use the appropriate formulas specified by TEOS-10. Other standard names for precisely defined salinity quantities are sea_water_absolute_salinity (S_A); sea_water_preformed_salinity (S_*), sea_water_reference_salinity (S_R); sea_water_cox_salinity (S_C), used for salinity observations between 1967 and 1977; and sea_water_knudsen_salinity (S_K), used for salinity observations between 1901 and 1966. Salinity quantities that do not match any of the precise definitions should be given the more general standard name of sea_water_salinity. Reference: www.teos-10.org; Lewis, 1980 doi:10.1109/JOE.1980.1145448. + + + + 1 + + + The practical salinity at the sea floor is that adjacent to the ocean bottom, which would be the deepest grid cell in an ocean model and within the benthic boundary layer for measurements. Practical Salinity, S_P, is a determination of the salinity of sea water, based on its electrical conductance. The measured conductance, corrected for temperature and pressure, is compared to the conductance of a standard potassium chloride solution, producing a value on the Practical Salinity Scale of 1978 (PSS-78). This name should not be used to describe salinity observations made before 1978, or ones not based on conductance measurements. Conversion of Practical Salinity to other precisely defined salinity measures should use the appropriate formulas specified by TEOS-10. Salinity quantities that do not match any of the precise definitions should be given the more general standard name of sea_water_salinity_at_sea_floor. Reference: www.teos-10.org; Lewis, 1980 doi:10.1109/JOE.1980.1145448. + + + + mol m-3 + + + "Mole concentration" means the number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Alkalinity" refers to total alkalinity equivalent concentration, including carbonate, borate, phosphorus, silicon, and nitrogen components. The subduction and subsequent transport of surface water carry into the interior ocean considerable quantities of alkalinity, which is entirely independent of biological activity (such as organic decomposition and oxidation) after the water leaves the sea surface. Such alkalinity is termed “preformed” alkalinity (Redfield,1942). @@ -17774,7 +20270,7 @@ totals are summed to obtain the index. dbar - The surface called "surface" means the lower boundary of the atmosphere. "Sea water pressure" is the pressure that exists in the medium of sea water. It includes the pressure due to overlying sea water, sea ice, air and any other medium that may be present. + The phrase "sea water surface" means the upper boundary of the liquid portion of an ocean or sea, including the boundary to floating ice if present. "Sea water pressure" is the pressure that exists in the medium of sea water. It includes the pressure due to overlying sea water, sea ice, air and any other medium that may be present. @@ -17788,14 +20284,14 @@ totals are summed to obtain the index. degree_C - The quantity with standard name sea_water_redistributed_conservative_temperature is a passive tracer in an ocean model which is subject to an externally imposed perturbative surface heat flux. The passive tracer is initialised to the conservative temperature in the control climate before the perturbation is imposed. Its surface flux is the heat flux from the atmosphere, not including the imposed perturbation, and is converted to a passive tracer increment as if it were being added to conservative temperature. The passive tracer is transported within the ocean as if it were conservative temperature. The passive tracer records redistributed heat, as described for the CMIP6 FAFMIP experiment (doi:10.5194/gmd-9-3993-2016), following earlier ideas. Conservative Temperature is defined as part of the Thermodynamic Equation of Seawater 2010 (TEOS-10) which was adopted in 2010 by the International Oceanographic Commission (IOC). Conservative Temperature is specific potential enthalpy (which has the standard name sea_water_specific_potential_enthalpy) divided by a fixed value of the specific heat capacity of sea water, namely cp_0 = 3991.86795711963 J kg-1 K-1. Conservative Temperature is a more accurate measure of the "heat content" of sea water, by a factor of one hundred, than is potential temperature. Because of this, it can be regarded as being proportional to the heat content of sea water per unit mass. Reference: www.teos-10.org; McDougall, 2003 doi: 10.1175/1520-0485(2003)033<0945:PEACOV>2.0.CO;2. + The quantity with standard name sea_water_redistributed_conservative_temperature is a passive tracer in an ocean model which is subject to an externally imposed perturbative surface heat flux. The passive tracer is initialised to the conservative temperature in the control climate before the perturbation is imposed. Its surface flux is the heat flux from the atmosphere, not including the imposed perturbation, and is converted to a passive tracer increment as if it were being added to conservative temperature. The passive tracer is transported within the ocean as if it were conservative temperature. The passive tracer records redistributed heat, as described for the CMIP6 FAFMIP experiment (doi:10.5194/gmd-9-3993-2016), following earlier ideas. Conservative Temperature is defined as part of the Thermodynamic Equation of Seawater 2010 (TEOS-10) which was adopted in 2010 by the International Oceanographic Commission (IOC). Conservative Temperature is specific potential enthalpy (which has the standard name sea_water_specific_potential_enthalpy) divided by a fixed value of the specific heat capacity of sea water, namely cp_0 = 3991.86795711963 J kg-1 K-1. Conservative Temperature is a more accurate measure of the "heat content" of sea water, by a factor of one hundred, than is potential temperature. Because of this, it can be regarded as being proportional to the heat content of sea water per unit mass. Reference: www.teos-10.org; McDougall, 2003 doi: 10.1175/1520-0485(2003)033<0945:PEACOV>2.0.CO;2. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). degree_C - The quantity with standard name sea_water_redistributed_potential_temperature is a passive tracer in an ocean model which is subject to an externally imposed perturbative surface heat flux. The passive tracer is initialised to the potential temperature in the control climate before the perturbation is imposed. Its surface flux is the heat flux from the atmosphere, not including the imposed perturbation, and is converted to a passive tracer increment as if it were being added to potential temperature. The passive tracer is transported within the ocean as if it were potential temperature. The passive tracer records redistributed heat, as described for the CMIP6 FAFMIP experiment (doi:10.5194/gmd-9-3993-2016), following earlier ideas. Potential temperature is the temperature a parcel of air or sea water would have if moved adiabatically to sea level pressure. + The quantity with standard name sea_water_redistributed_potential_temperature is a passive tracer in an ocean model which is subject to an externally imposed perturbative surface heat flux. The passive tracer is initialised to the potential temperature in the control climate before the perturbation is imposed. Its surface flux is the heat flux from the atmosphere, not including the imposed perturbation, and is converted to a passive tracer increment as if it were being added to potential temperature. The passive tracer is transported within the ocean as if it were potential temperature. The passive tracer records redistributed heat, as described for the CMIP6 FAFMIP experiment (doi:10.5194/gmd-9-3993-2016), following earlier ideas. Potential temperature is the temperature a parcel of air or sea water would have if moved adiabatically to sea level pressure. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). @@ -17816,7 +20312,7 @@ totals are summed to obtain the index. 1e-3 - The salinity at the sea floor is that adjacent to the ocean bottom, which would be the deepest grid cell in an ocean model. Sea water salinity is the salt concentration of sea water, often on the Practical Salinity Scale of 1978. However, the unqualified term 'salinity' is generic and does not necessarily imply any particular method of calculation. The units of salinity are dimensionless and the units attribute should normally be given as 1e-3 or 0.001 i.e. parts per thousand. There are standard names for the more precisely defined salinity quantities sea_water_knudsen_salinity, S_K (used for salinity observations between 1901 and 1966), sea_water_cox_salinity, S_C (used for salinity observations between 1967 and 1977), sea_water_practical_salinity, S_P (used for salinity observations from 1978 to the present day), sea_water_absolute_salinity, S_A, sea_water_preformed_salinity, S_*, and sea_water_reference_salinity. Practical Salinity is reported on the Practical Salinity Scale of 1978 (PSS-78), and is usually based on the electrical conductivity of sea water in observations since the 1960s. Conversion of data between the observed scales follows S_P = (S_K - 0.03) * (1.80655 / 1.805) and S_P = S_C, however the accuracy of the latter is dependent on whether chlorinity or conductivity was used to determine the S_C value, with this inconsistency driving the development of PSS-78. The more precise standard names should be used where appropriate for both modelled and observed salinities. In particular, the use of sea_water_salinity to describe salinity observations made from 1978 onwards is now deprecated in favor of the term sea_water_practical_salinity which is the salinity quantity stored by national data centers for post-1978 observations. The only exception to this is where the observed salinities are definitely known not to be recorded on the Practical Salinity Scale. Practical salinity units are dimensionless. The unit "parts per thousand" was used for sea_water_knudsen_salinity and sea_water_cox_salinity. + The salinity at the sea floor is that adjacent to the ocean bottom, which would be the deepest grid cell in an ocean model and within the benthic boundary layer for measurements. Sea water salinity is the salt concentration of sea water, often on the Practical Salinity Scale of 1978. However, the unqualified term 'salinity' is generic and does not necessarily imply any particular method of calculation. The units of salinity are dimensionless and the units attribute should normally be given as 1e-3 or 0.001 i.e. parts per thousand. There are standard names for the more precisely defined salinity quantities sea_water_knudsen_salinity, S_K (used for salinity observations between 1901 and 1966), sea_water_cox_salinity, S_C (used for salinity observations between 1967 and 1977), sea_water_practical_salinity, S_P (used for salinity observations from 1978 to the present day), sea_water_absolute_salinity, S_A, sea_water_preformed_salinity, S_*, and sea_water_reference_salinity. Practical Salinity is reported on the Practical Salinity Scale of 1978 (PSS-78), and is usually based on the electrical conductivity of sea water in observations since the 1960s. Conversion of data between the observed scales follows S_P = (S_K - 0.03) * (1.80655 / 1.805) and S_P = S_C, however the accuracy of the latter is dependent on whether chlorinity or conductivity was used to determine the S_C value, with this inconsistency driving the development of PSS-78. The more precise standard names should be used where appropriate for both modelled and observed salinities. In particular, the use of sea_water_salinity to describe salinity observations made from 1978 onwards is now deprecated in favor of the term sea_water_practical_salinity which is the salinity quantity stored by national data centers for post-1978 observations. The only exception to this is where the observed salinities are definitely known not to be recorded on the Practical Salinity Scale. Practical salinity units are dimensionless. The unit "parts per thousand" was used for sea_water_knudsen_salinity and sea_water_cox_salinity. @@ -17837,7 +20333,7 @@ totals are summed to obtain the index. kg m-3 - Sigma-theta of sea water is the potential density (i.e. the density when moved adiabatically to a reference pressure) of water having the same temperature and salinity, minus 1000 kg m-3. Note that sea water sigma is not the same quantity as the dimensionless ocean sigma coordinate (see Appendix D of the CF convention), for which there is another standard name. + Sigma-theta of sea water is the potential density (i.e. the density when moved adiabatically to a reference pressure) of water having the same temperature and salinity, minus 1000 kg m-3. Note that sea water sigma is not the same quantity as the dimensionless ocean sigma coordinate (see Appendix D of the CF convention), for which there is another standard name. To specify the reference pressure to which the quantity applies, provide a scalar coordinate variable with standard name reference_pressure. @@ -17861,25 +20357,60 @@ totals are summed to obtain the index. Speed is the magnitude of velocity. - - K - 80 + + m s-1 + + + Speed is the magnitude of velocity. The speed at the sea floor is that adjacent to the ocean bottom, which would be the deepest grid cell in an ocean model and within the benthic boundary layer for measurements. + + + + m s-1 + + + Speed is the magnitude of velocity. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Ekman drift" is the movement of a layer of water (the Ekman layer) due to the combination of wind stress at the sea surface and the Coriolis effect. Ekman drift is to the right of the wind direction in the Northern Hemisphere and the left in the Southern Hemisphere. Reference: https://www.open.edu/openlearn/science-maths-technology/the-oceans/content-section-4.3. + + + + m s-1 + + + Speed is the magnitude of velocity. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Due to tides" means due to all astronomical gravity changes which manifest as tides. No distinction is made between different tidal components. + + + + s-1 + + + Speed is the magnitude of velocity. Sea water speed shear is the derivative of sea water speed with respect to depth. + + + + K + 80 to - Sea water temperature is the in situ temperature of the sea water. To specify the depth at which the temperature applies use a vertical coordinate variable or scalar coordinate variable. There are standard names for sea_surface_temperature, sea_surface_skin_temperature, sea_surface_subskin_temperature and sea_surface_foundation_temperature which can be used to describe data located at the specified surfaces. For observed data, depending on the period during which the observation was made, the measured in situ temperature was recorded against standard "scales". These historical scales include the International Practical Temperature Scale of 1948 (IPTS-48; 1948-1967), the International Practical Temperature Scale of 1968 (IPTS-68, Barber, 1969; 1968-1989) and the International Temperature Scale of 1990 (ITS-90, Saunders 1990; 1990 onwards). Conversion of data between these scales follows t68 = t48 - (4.4 x 10e-6) * t48(100 - t - 48); t90 = 0.99976 * t68. Observations made prior to 1948 (IPTS-48) have not been documented and therefore a conversion cannot be certain. Differences between t90 and t68 can be up to 0.01 at temperatures of 40 C and above; differences of 0.002-0.007 occur across the standard range of ocean temperatures (-10 - 30 C). The International Equation of State of Seawater 1980 (EOS-80, UNESCO, 1981) and the Practical Salinity Scale (PSS-78) were both based on IPTS-68, while the Thermodynamic Equation of Seawater 2010 (TEOS-10) is based on ITS-90. References: Barber, 1969, doi: 10.1088/0026-1394/5/2/001; UNESCO, 1981; Saunders, 1990, WOCE Newsletter, 10, September 1990. + Sea water temperature is the in situ temperature of the sea water. To specify the depth at which the temperature applies use a vertical coordinate variable or scalar coordinate variable. There are standard names for sea_surface_temperature, sea_surface_skin_temperature, sea_surface_subskin_temperature and sea_surface_foundation_temperature which can be used to describe data located at the specified surfaces. For observed data, depending on the period during which the observation was made, the measured in situ temperature was recorded against standard "scales". These historical scales include the International Practical Temperature Scale of 1948 (IPTS-48; 1948-1967), the International Practical Temperature Scale of 1968 (IPTS-68, Barber, 1969; 1968-1989) and the International Temperature Scale of 1990 (ITS-90, Saunders 1990; 1990 onwards). Conversion of data between these scales follows t68 = t48 - (4.4 x 10e-6) * t48(100 - t - 48); t90 = 0.99976 * t68. Observations made prior to 1948 (IPTS-48) have not been documented and therefore a conversion cannot be certain. Differences between t90 and t68 can be up to 0.01 at temperatures of 40 C and above; differences of 0.002-0.007 occur across the standard range of ocean temperatures (-10 - 30 C). The International Equation of State of Seawater 1980 (EOS-80, UNESCO, 1981) and the Practical Salinity Scale (PSS-78) were both based on IPTS-68, while the Thermodynamic Equation of Seawater 2010 (TEOS-10) is based on ITS-90. References: Barber, 1969, doi: 10.1088/0026-1394/5/2/001; UNESCO, 1981; Saunders, 1990, WOCE Newsletter, 10, September 1990. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). - + K - Sea water temperature is the in situ temperature of the sea water. + The term "anomaly" means difference from climatology. Sea water temperature is the in situ temperature of the sea water. To specify the depth at which the temperature anomaly applies, use a vertical coordinate variable or scalar coordinate variable. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). - - degree - 47 + + K + - The phrase "to_direction" is used in the construction X_to_direction and indicates the direction towards which the velocity vector of X is headed. The direction is a bearing in the usual geographical sense, measured positive clockwise from due north. + Sea water temperature is the in situ temperature of the sea water. The temperature at the sea floor is that adjacent to the ocean bottom, which would be the deepest grid cell in an ocean model and within the benthic boundary layer for measurements. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). + + + + K + + + Sea water temperature is the in situ temperature of the sea water. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). @@ -17896,6 +20427,41 @@ totals are summed to obtain the index. Turbidity is a dimensionless quantity which is expressed in NTU (Nephelometric Turbidity Units). Turbidity expressed in NTU is the proportion of white light scattered back to a transceiver by the particulate load in a body of water, represented on an arbitrary scale referenced against measurements made in the laboratory on aqueous suspensions of formazine beads. Sea water turbidity may also be measured by the quantity with standard name secchi_depth_of_sea_water. + + degree + + + A velocity is a vector quantity. The phrase "from_direction" is used in the construction X_from_direction and indicates the direction from which the velocity vector of X is coming. The direction is a bearing in the usual geographical sense, measured positive clockwise from due north. + + + + degree + 47 + + A velocity is a vector quantity. The phrase "to_direction" is used in the construction X_to_direction and indicates the direction towards which the velocity vector of X is headed. The direction is a bearing in the usual geographical sense, measured positive clockwise from due north. + + + + degree + + + A velocity is a vector quantity. The phrase "to_direction" is used in the construction X_to_direction and indicates the direction towards which the velocity vector of X is headed. The direction is a bearing in the usual geographical sense, measured positive clockwise from due north. The direction at the sea floor is that adjacent to the ocean bottom, which would be the deepest grid cell in an ocean model and within the benthic boundary layer for measurements. + + + + degree + + + A velocity is a vector quantity. The phrase "to_direction" is used in the construction X_to_direction and indicates the direction towards which the velocity vector of X is headed. The direction is a bearing in the usual geographical sense, measured positive clockwise from due north. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Ekman drift" is the movement of a layer of water (the Ekman layer) due to the combination of wind stress at the sea surface and the Coriolis effect. Ekman drift is to the right of the wind direction in the Northern Hemisphere and the left in the Southern Hemisphere. Reference: https://www.open.edu/openlearn/science-maths-technology/the-oceans/content-section-4.3. + + + + degree + + + A velocity is a vector quantity. The phrase "to_direction" is used in the construction X_to_direction and indicates the direction towards which the velocity vector of X is headed. The direction is a bearing in the usual geographical sense, measured positive clockwise from due north. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Due to tides" means due to all astronomical gravity changes which manifest as tides. No distinction is made between different tidal components. + + m3 @@ -17970,8 +20536,7 @@ totals are summed to obtain the index. - A variable with the standard name of sensor_band_identifier contains strings which give the alphanumeric identifier of a sensor band. These strings have not yet been standardised. - + A variable with the standard name of sensor_band_identifier contains strings which give the alphanumeric identifier of a sensor band. These strings have not yet been standardised. @@ -17995,6 +20560,20 @@ totals are summed to obtain the index. "Time fraction" means a fraction of a time interval. The interval in question must be specified by the values or bounds of the time coordinate variable associated with the data. "X_time_fraction" means the fraction of the time interval during which X occurs. + + m + + + The phrase "cloud_base" refers to the base of the lowest cloud. Altitude is the (geometric) height above the geoid, which is the reference geopotential surface. The geoid is similar to mean sea level. Shallow convective cloud is nonprecipitating cumulus cloud with a cloud top below 3000m above the surface produced by the convection schemes in an atmosphere model. Some atmosphere models differentiate between shallow and deep convection. + + + + m + + + The phrase "cloud_top" refers to the top of the highest cloud. Altitude is the (geometric) height above the geoid, which is the reference geopotential surface. The geoid is similar to mean sea level. Shallow convective cloud is nonprecipitating cumulus cloud with a cloud top below 3000m above the surface produced by the convection schemes in an atmosphere model. Some atmosphere models differentiate between shallow and deep convection. + + kg m-2 s-1 @@ -18002,6 +20581,34 @@ totals are summed to obtain the index. Convective precipitation is that produced by the convection schemes in an atmosphere model. Some atmosphere models differentiate between shallow and deep convection. "Precipitation" in the earth's atmosphere means precipitation of water in all phases. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. + + Pa + + + Shear strength is the amount of force applied to a normal plane required to bring a frozen soil to failure along a tangential plane. Shear strength depends on the angle of friction and cohesion of the soil. + + + + Pa + + + Shear strength is the amount of force applied to a normal plane required to bring the soil to failure along a tangential plane. Shear strength depends on the angle of friction and cohesion of the soil. + + + + 1 + + + The magnitude of an acoustic signal emitted by the instrument toward a reflecting surface and received again by the instrument. + + + + kg s-1 + + + The amount of silicate mass transported in the river channels from land into the ocean. This quantity can be provided at a certain location within the river network and floodplain (over land) or at the river mouth (over ocean) where the river enters the ocean. "River" refers to water in the fluvial system (stream and floodplain). + + 1 @@ -18009,6 +20616,62 @@ totals are summed to obtain the index. "Single scattering albedo" is the fraction of radiation in an incident light beam scattered by the particles of an aerosol reference volume for a given wavelength. It is the ratio of the scattering and the extinction coefficients of the aerosol particles in the reference volume. A coordinate variable with a standard name of radiation_wavelength or radiation_frequency should be included to specify either the wavelength or frequency. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Ambient_aerosol" means that the aerosol is measured or modelled at the ambient state of pressure, temperature and relative humidity that exists in its immediate environment. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. To specify the relative humidity and temperature at which the quantity described by the standard name applies, provide scalar coordinate variables with standard names of "relative_humidity" and "air_temperature". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. + + 1 + + + Singular values of the matrix representing the remote sensing averaging kernels (Weber 2019; Schneider et al., 2022) of the methane mole fractions obtained by a remote sensing observation (changes of methane in the retrieved atmosphere relative to the changes of methane in the true atmosphere, Rodgers 2000). + + + + 1 + + + Singular values of the matrix representing the remote sensing averaging kernels (Weber 2019; Schneider et al., 2022) of the methane mole fractions obtained by a remote sensing observation (changes of methane in the retrieved atmosphere relative to the changes of methane in the true atmosphere, Rodgers 2000). + + + + kg m-2 s-1 + + + In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. "Sinking" is the gravitational settling of particulate matter suspended in a liquid. A sinking flux is positive downwards and is calculated relative to the movement of the surrounding fluid. Particulate means suspended solids of all sizes. Biogenic silica is a hydrated form of silica (silicon dioxide) with the chemical formula SiO2.nH2O sometimes referred to as opaline silica or opal. It is created by biological processes and in sea water it is predominantly the skeletal material of diatoms. + + + + kg m-2 s-1 + + + In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. "Sinking" is the gravitational settling of particulate matter suspended in a liquid. A sinking flux is positive downwards and is calculated relative to the movement of the surrounding fluid. Particulate means suspended solids of all sizes. + + + + kg m-2 s-1 + + + In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. "Sinking" is the gravitational settling of particulate matter suspended in a liquid. A sinking flux is positive downwards and is calculated relative to the movement of the surrounding fluid. Particulate means suspended solids of all sizes. Particulate inorganic carbon is carbon bound in molecules ionically that may be liberated from the particles as carbon dioxide by acidification. + + + + kg m-2 s-1 + + + In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. "Sinking" is the gravitational settling of particulate matter suspended in a liquid. A sinking flux is positive downwards and is calculated relative to the movement of the surrounding fluid. + + + + kg m-2 s-1 + + + In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. "Sinking" is the gravitational settling of particulate matter suspended in a liquid. A sinking flux is positive downwards and is calculated relative to the movement of the surrounding fluid. Particulate means suspended solids of all sizes. + + + + kg m-2 s-1 + + + In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. "Sinking" is the gravitational settling of particulate matter suspended in a liquid. A sinking flux is positive downwards and is calculated relative to the movement of the surrounding fluid. Particulate means suspended solids of all sizes. + + mol m-2 s-1 @@ -18041,7 +20704,7 @@ totals are summed to obtain the index. mol m-2 s-1 - In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. 'Sinking' is the gravitational settling of particulate matter suspended in a liquid. A sinking flux is positive downwards and is calculated relative to the movement of the surrounding fluid. + In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. "Sinking" is the gravitational settling of particulate matter suspended in a liquid. A sinking flux is positive downwards and is calculated relative to the movement of the surrounding fluid. "Particulate organic nitrogen" means the sum of all organic nitrogen compounds, which are solid or which are bound to solid particles. "Organic nitrogen", when measured, always refers to all nitrogen incorporated in carbon compounds in the sample. Models may use the term to refer to nitrogen contained in specific groups of organic compounds in which case the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. @@ -18065,11 +20728,11 @@ totals are summed to obtain the index. "Content" indicates a quantity per unit area. The "soil content" of a quantity refers to the vertical integral from the surface down to the bottom of the soil model. For the content between specified levels in the soil, standard names including content_of_soil_layer are used. Soil carbon is returned to the atmosphere as the organic matter decays. The decay process takes varying amounts of time depending on the composition of the organic matter, the temperature and the availability of moisture. A carbon "soil pool" means the carbon contained in organic matter which has a characteristic period over which it decays and releases carbon into the atmosphere. "Slow soil pool" refers to the decay of organic matter in soil with a characteristic period of more than a hundred years under reference climate conditions of a temperature of 20 degrees Celsius and no water limitations. - - kg m-3 + + 1 - + "Area fraction" is the fraction of a grid cell's horizontal area that has some characteristic of interest. It is evaluated as the area of interest divided by the grid cell area. It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. Snow "viewable from above" refers to the snow on objects or the ground as viewed from above, which excludes, for example, falling snow flakes and snow obscured by a canopy, vegetative cover, or other features resting on the surface. @@ -18107,6 +20770,13 @@ totals are summed to obtain the index. Soil albedo is the albedo of the soil surface assuming no snow. Albedo is the ratio of outgoing to incoming shortwave irradiance, where 'shortwave irradiance' means that both the incoming and outgoing radiation are integrated across the solar spectrum. + + kg m-3 + + + The density of the soil in its natural condition. Also known as bulk density. The density of a substance is its mass per unit volume. + + kg m-2 @@ -18121,6 +20791,13 @@ totals are summed to obtain the index. Hydraulic conductivity is the constant k in Darcy's Law q=-k grad h for fluid flow q (volume transport per unit area i.e. velocity) through a porous medium, where h is the hydraulic head (pressure expressed as an equivalent depth of water). + + kg m-2 + + + "Content" indicates a quantity per unit area. The "soil content" of a quantity refers to the vertical integral from the surface down to the bottom of the soil model. For the content between specified levels in the soil, standard names including "content_of_soil_layer" are used. + + kg m-2 @@ -18209,30 +20886,44 @@ totals are summed to obtain the index. K 85 - Soil temperature is the bulk temperature of the soil, not the surface (skin) temperature. + Soil temperature is the bulk temperature of the soil, not the surface (skin) temperature. "Soil" means the near-surface layer where plants sink their roots. For subsurface temperatures that extend beneath the soil layer or in areas where there is no surface soil layer, the standard name temperature_in_ground should be used. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). J kg-1 K-1 - Thermal capacity, or heat capacity, is the amount of heat energy required to increase the temperature of 1 kg of material by 1 K. It is a property of the material. + Thermal capacity, or heat capacity, is the amount of heat energy required to increase the temperature of 1 kg of material by 1 K. It is a property of the material. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). W m-1 K-1 - Thermal conductivity is the constant k in the formula q = -k grad T where q is the heat transfer per unit time per unit area of a surface normal to the direction of transfer and grad T is the temperature gradient. Thermal conductivity is a property of the material. + Thermal conductivity is the constant k in the formula q = -k grad T where q is the heat transfer per unit time per unit area of a surface normal to the direction of transfer and grad T is the temperature gradient. Thermal conductivity is a property of the material. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). - 1 + A variable with the standard name of soil_type contains strings which indicate the character of the soil e.g. clay. These strings have not yet been standardised. Alternatively, the data variable may contain integers which can be translated to strings using flag_values and flag_meanings attributes. + + 1 + + + soil_water_ph is the measure of acidity of soil moisture, defined as the negative logarithm of the concentration of dissolved hydrogen ions in soil water. + + + + 1e-3 + + + The quantity with standard name soil_water_salinity is the salt content of soil water, often on the Practical Salinity Scale of 1978. However, the unqualified term 'salinity' is generic and does not necessarily imply any particular method of calculation. The units of salinity are dimensionless and normally given as 1e-3 or 0.001 i.e. parts per thousand. + + degree @@ -18300,7 +20991,7 @@ totals are summed to obtain the index. kg m-2 - "Content" indicates a quantity per unit area. The surface called "surface" means the lower boundary of the atmosphere. + "Content" indicates a quantity per unit area. Surface snow refers to the snow on the solid ground or on surface ice cover, but excludes, for example, falling snowflakes and snow on plants. @@ -18380,6 +21071,13 @@ totals are summed to obtain the index. "specific" means per unit mass. Dry energy is the sum of dry static energy and kinetic energy. Dry static energy is the sum of enthalpy and potential energy (itself the sum of gravitational and centripetal potential energy). Enthalpy can be written either as (1) CpT, where Cp is heat capacity at constant pressure, T is absolute temperature, or (2) U+pV, where U is internal energy, p is pressure and V is volume. + + J kg-1 + + + The specific_enthalpy_of_air is the enthalpy of air per unit mass, which can be computed for an air sample as the sum of the enthalpy of the dry air and the enthalpy of the water vapor in that air, divided by the mass of dry air. + + J kg-1 @@ -18387,11 +21085,18 @@ totals are summed to obtain the index. "specific" means per unit mass. Potential energy is the sum of the gravitational potential energy relative to the geoid and the centripetal potential energy. (The geopotential is the specific potential energy.) + + J kg-1 K-1 + + + Thermal capacity, or heat capacity, is the amount of heat energy required to increase the temperature of 1 kg of material by 1 K. It is a property of the material. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). + + J kg-1 K-1 - The specific heat capacity of sea water, Cp(ocean), is used in ocean models to convert between model prognostic temperature (potential or conservative temperature) and model heat content. + The specific heat capacity of sea water, Cp(ocean), is used in ocean models to convert between model prognostic temperature (potential or conservative temperature) and model heat content. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). @@ -18422,6 +21127,13 @@ totals are summed to obtain the index. "Specific" means per unit mass. "Turbulent kinetic energy" is the kinetic energy of chaotic fluctuations of the fluid flow. The dissipation of kinetic energy arises in ocean models as a result of the viscosity of sea water. + + m2 s-2 + + + Specific means per unit mass. "Turbulent kinetic energy" is the kinetic energy of all eddy-induced motion that is not resolved on the grid scale of the model. + + m2 s-2 @@ -18429,6 +21141,13 @@ totals are summed to obtain the index. "Specific" means per unit mass. "Turbulent kinetic energy" is the kinetic energy of chaotic fluctuations of the fluid flow. + + Hz + + + The quantity with standard name spectral_width_of_radio_wave_in_air_scattered_by_air is the frequency width of the signal received by an instrument such as a radar or lidar. The signal returned to the instrument is the sum of all scattering from a given volume of air regardless of mechanism (examples are scattering by aerosols, hydrometeors and refractive index irregularities, or whatever else the instrument detects). + + m s-1 @@ -18447,35 +21166,42 @@ totals are summed to obtain the index. day - Air temperature is the bulk temperature of the air, not the surface (skin) temperature. A spell is the number of consecutive days on which the condition X_below|above_threshold is satisfied. A variable whose standard name has the form spell_length_of_days_with_X_below|above_threshold must have a coordinate variable or scalar coordinate variable with the a standard name of X to supply the threshold(s). It must have a climatological time variable, and a cell_method entry for within days which describes the processing of quantity X before the threshold is applied. A spell_length_of_days is an intensive quantity in time, and the cell_methods entry for over days can be any of the methods listed in Appendix E appropriate for intensive quantities e.g. "maximum", "minimum" or "mean". + Air temperature is the bulk temperature of the air, not the surface (skin) temperature. A spell is the number of consecutive days on which the condition X_below|above_threshold is satisfied. A variable whose standard name has the form spell_length_of_days_with_X_below|above_threshold must have a coordinate variable or scalar coordinate variable with the standard name of X to supply the threshold(s). It must have a climatological time variable, and a cell_method entry for within days which describes the processing of quantity X before the threshold is applied. A spell_length_of_days is an intensive quantity in time, and the cell_methods entry for over days can be any of the methods listed in Appendix E appropriate for intensive quantities e.g. "maximum", "minimum" or "mean". day - Air temperature is the bulk temperature of the air, not the surface (skin) temperature. A spell is the number of consecutive days on which the condition X_below|above_threshold is satisfied. A variable whose standard name has the form spell_length_of_days_with_X_below|above_threshold must have a coordinate variable or scalar coordinate variable with the a standard name of X to supply the threshold(s). It must have a climatological time variable, and a cell_method entry for within days which describes the processing of quantity X before the threshold is applied. A spell_length_of_days is an intensive quantity in time, and the cell_methods entry for over days can be any of the methods listed in Appendix E appropriate for intensive quantities e.g. "maximum", "minimum" or "mean". + Air temperature is the bulk temperature of the air, not the surface (skin) temperature. A spell is the number of consecutive days on which the condition X_below|above_threshold is satisfied. A variable whose standard name has the form spell_length_of_days_with_X_below|above_threshold must have a coordinate variable or scalar coordinate variable with the standard name of X to supply the threshold(s). It must have a climatological time variable, and a cell_method entry for within days which describes the processing of quantity X before the threshold is applied. A spell_length_of_days is an intensive quantity in time, and the cell_methods entry for over days can be any of the methods listed in Appendix E appropriate for intensive quantities e.g. "maximum", "minimum" or "mean". day - "Amount" means mass per unit area. "Precipitation" in the earth's atmosphere means precipitation of water in all phases. The construction lwe_thickness_of_X_amount or _content means the vertical extent of a layer of liquid water having the same mass per unit area. The abbreviation "lwe" means liquid water equivalent. A spell is the number of consecutive days on which the condition X_below|above_threshold is satisfied. A variable whose standard name has the form spell_length_of_days_with_X_below|above_threshold must have a coordinate variable or scalar coordinate variable with the a standard name of X to supply the threshold(s). It must have a climatological time variable, and a cell_method entry for within days which describes the processing of quantity X before the threshold is applied. A spell_length_of_days is an intensive quantity in time, and the cell_methods entry for over days can be any of the methods listed in Appendix E appropriate for intensive quantities e.g. "maximum", "minimum" or "mean". + "Amount" means mass per unit area. "Precipitation" in the earth's atmosphere means precipitation of water in all phases. The construction lwe_thickness_of_X_amount or _content means the vertical extent of a layer of liquid water having the same mass per unit area. The abbreviation "lwe" means liquid water equivalent. A spell is the number of consecutive days on which the condition X_below|above_threshold is satisfied. A variable whose standard name has the form spell_length_of_days_with_X_below|above_threshold must have a coordinate variable or scalar coordinate variable with the standard name of X to supply the threshold(s). It must have a climatological time variable, and a cell_method entry for within days which describes the processing of quantity X before the threshold is applied. A spell_length_of_days is an intensive quantity in time, and the cell_methods entry for over days can be any of the methods listed in Appendix E appropriate for intensive quantities e.g. "maximum", "minimum" or "mean". day - "Amount" means mass per unit area. "Precipitation" in the earth's atmosphere means precipitation of water in all phases.The construction lwe_thickness_of_X_amount or _content means the vertical extent of a layer of liquid water having the same mass per unit area. The abbreviation "lwe" means liquid water equivalent. A spell is the number of consecutive days on which the condition X_below|above_threshold is satisfied. A variable whose standard name has the form spell_length_of_days_with_X_below|above_threshold must have a coordinate variable or scalar coordinate variable with the a standard name of X to supply the threshold(s). It must have a climatological time variable, and a cell_method entry for within days which describes the processing of quantity X before the threshold is applied. A spell_length_of_days is an intensive quantity in time, and the cell_methods entry for over days can be any of the methods listed in Appendix E appropriate for intensive quantities e.g. "maximum", "minimum" or "mean". + "Amount" means mass per unit area. "Precipitation" in the earth's atmosphere means precipitation of water in all phases.The construction lwe_thickness_of_X_amount or _content means the vertical extent of a layer of liquid water having the same mass per unit area. The abbreviation "lwe" means liquid water equivalent. A spell is the number of consecutive days on which the condition X_below|above_threshold is satisfied. A variable whose standard name has the form spell_length_of_days_with_X_below|above_threshold must have a coordinate variable or scalar coordinate variable with the standard name of X to supply the threshold(s). It must have a climatological time variable, and a cell_method entry for within days which describes the processing of quantity X before the threshold is applied. A spell_length_of_days is an intensive quantity in time, and the cell_methods entry for over days can be any of the methods listed in Appendix E appropriate for intensive quantities e.g. "maximum", "minimum" or "mean". + + + + 1 + + + A quality flag that reports the result of the Spike test, which checks that the difference between two points in a series of values is within reasonable bounds. The linkage between the data variable and this variable is achieved using the ancillary_variables attribute. There are standard names for other specific quality tests which take the form of X_quality_flag. Quality information that does not match any of the specific quantities should be given the more general standard name of quality_flag. K2 mptta - "square_of_X" means X*X. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. + The phrase "square_of_X" means X*X. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). @@ -18545,7 +21271,7 @@ totals are summed to obtain the index. K2 - Sea surface temperature is usually abbreviated as "SST". It is the temperature of sea water near the surface (including the part under sea-ice, if any), and not the skin temperature, whose standard name is surface_temperature. For the temperature of sea water at a particular depth or layer, a data variable of sea_water_temperature with a vertical coordinate axis should be used. "square_of_X" means X*X. + Sea surface temperature is usually abbreviated as "SST". It is the temperature of sea water near the surface (including the part under sea-ice, if any), and not the skin temperature, whose standard name is surface_temperature. For the temperature of sea water at a particular depth or layer, a data variable of sea_water_temperature with a vertical coordinate axis should be used. "square_of_X" means X*X. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). @@ -18562,11 +21288,18 @@ totals are summed to obtain the index. "Upward" indicates a vector component which is positive when directed upward (negative downward). Ocean transport means transport by all processes, both sea water and sea ice. "square_of_X" means X*X. + + K + + + In thermodynamics and fluid mechanics, stagnation temperature is the temperature at a stagnation point in a fluid flow. At a stagnation point the speed of the fluid is zero and all of the kinetic energy has been converted to internal energy and is added to the local static enthalpy. In both compressible and incompressible fluid flow, the stagnation temperature is equal to the total temperature at all points on the streamline leading to the stagnation point. In aviation, stagnation temperature is known as total air temperature and is measured by a temperature probe mounted on the surface of the aircraft. The probe is designed to bring the air to rest relative to the aircraft. As the air is brought to rest, kinetic energy is converted to internal energy. The air is compressed and experiences an adiabatic increase in temperature. Therefore, total air temperature is higher than the static (or ambient) air temperature. Total air temperature is an essential input to an air data computer in order to enable computation of static air temperature and hence true airspeed. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). + + 1 - A variable with the standard name of status_flag contains an indication of quality or other status of another data variable. The linkage between the data variable and the variable with the standard_name of status_flag is achieved using the ancillary_variables attribute. + A variable with the standard name of status_flag contains an indication of quality or other status of another data variable. This may include the status of the instrument producing the data as well as data quality information. The linkage between the data variable and the variable with the standard_name of status_flag is achieved using the ancillary_variables attribute. A variable which contains purely quality information may use the standard name of quality_flag to provide an assessed quality of the corresponding data. @@ -18597,18 +21330,25 @@ totals are summed to obtain the index. "Sea surface height" is a time-varying quantity. The steric change in sea surface height is the change in height that a water column of standard temperature zero degrees Celsius and practical salinity S=35.0 would undergo when its temperature and salinity are changed to the observed values. The sum of the quantities with standard names thermosteric_change_in_sea_surface_height and halosteric_change_in_sea_surface_height is the total steric change in the water column height, which has the standard name of steric_change_in_sea_surface_height. The sum of the quantities with standard names sea_water_mass_per_unit_area_expressed_as_thickness and steric_change_in_sea_surface_height is the total thickness of the sea water column. + + m s-1 + + + Storm motion speed is defined as a two dimensional velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name upward_air_velocity.) It is defined as the average speed of a supercell, and the direction the storm will move from. It is not dependent on the orientation of the ground-relative winds. Storm motion speed generally follows the methodology outlined in Bunkers et al. (2000). + + 1 - "X_area_fraction" means the fraction of horizontal area occupied by X. Cloud area fraction is also called "cloud amount" and "cloud cover". The cloud area fraction is for the whole atmosphere column, as seen from the surface or the top of the atmosphere. The cloud area fraction in a layer of the atmosphere has the standard name cloud_area_fraction_in_atmosphere_layer. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). + "Area fraction" is the fraction of a grid cell's horizontal area that has some characteristic of interest. It is evaluated as the area of interest divided by the grid cell area. It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. The cloud area fraction is for the whole atmosphere column, as seen from the surface or the top of the atmosphere. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). Cloud area fraction is also called "cloud amount" and "cloud cover". 1 - "Layer" means any layer with upper and lower boundaries that have constant values in some vertical coordinate. There must be a vertical coordinate variable indicating the extent of the layer(s). If the layers are model layers, the vertical coordinate can be model_level_number, but it is recommended to specify a physical coordinate (in a scalar or auxiliary coordinate variable) as well. "X_area_fraction" means the fraction of horizontal area occupied by X. "X_area" means the horizontal area occupied by X within the grid cell. Cloud area fraction is also called "cloud amount" and "cloud cover". In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). + "Area fraction" is the fraction of a grid cell's horizontal area that has some characteristic of interest. It is evaluated as the area of interest divided by the grid cell area. It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. "Layer" means any layer with upper and lower boundaries that have constant values in some vertical coordinate. There must be a vertical coordinate variable indicating the extent of the layer(s). If the layers are model layers, the vertical coordinate can be model_level_number, but it is recommended to specify a physical coordinate (in a scalar or auxiliary coordinate variable) as well. Standard names also exist for high, medium and low cloud types. Standard names referring only to "cloud_area_fraction" should be used for quantities for the whole atmosphere column. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). Cloud area fraction is also called "cloud amount" and "cloud cover". @@ -18618,6 +21358,13 @@ totals are summed to obtain the index. Emissivity is the ratio of the power emitted by an object to the power that would be emitted by a perfect black body having the same temperature as the object. The emissivity is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength or radiation_frequency is included to specify either the wavelength or frequency. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). "longwave" means longwave radiation. + + kg m-2 + + + Stratiform precipitation, whether liquid or frozen, is precipitation that formed in stratiform cloud. Graupel consists of heavily rimed snow particles, often called snow pellets; often indistinguishable from very small soft hail except when the size convention that hail must have a diameter greater than 5 mm is adopted. Reference: American Meteorological Society Glossary http://glossary.ametsoc.org/wiki/Graupel. There are also separate standard names for hail. Standard names for "graupel_and_hail" should be used to describe data produced by models that do not distinguish between hail and graupel. "Amount" means mass per unit area. + + kg m-2 s-1 @@ -18797,35 +21544,49 @@ totals are summed to obtain the index. K - The surface called "surface" means the lower boundary of the atmosphere.The brightness temperature of a body is the temperature of a black body which radiates the same power per unit solid angle per unit area. + The surface called "surface" means the lower boundary of the atmosphere.The brightness temperature of a body is the temperature of a black body which radiates the same power per unit solid angle per unit area. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). + + + + m2 s-3 + + + A variable quantifying net density gains or losses in air parcel buoyancy based on turbulent heat and moisture fluxes, represented by virtual temperature flux, at the air-sea interface. Positive values indicate a buoyancy flux out of the ocean (into the air) that will destabilize the atmosphere. + + + + m2 s-3 + + + A variable quantifying net density gains or losses in water parcel buoyancy based on thermal (net surface heat flux) and haline (precipitation minus evaporation) forcings at the air-sea interface. A positive value indicates a buoyancy flux into the ocean that will stabilize (i.e., stratify) the surface ocean layer. Pa - The surface called "surface" means the lower boundary of the atmosphere. The chemical formula for carbon dioxide is CO2. In ocean biogeochemistry models, an "abiotic analogue" is used to simulate the effect on a modelled variable when biological effects on ocean carbon concentration and alkalinity are ignored. The partial pressure of a gaseous constituent of air is the pressure which it alone would exert with unchanged temperature and number of moles per unit volume. The partial pressure of a dissolved gas in sea water is the partial pressure in air with which it would be in equilibrium. The partial pressure difference between sea water and air is positive when the partial pressure of the dissolved gas in sea water is greater than the partial pressure in air. + The surface called "surface" means the lower boundary of the atmosphere. The chemical formula for carbon dioxide is CO2. In ocean biogeochemistry models, an "abiotic analogue" is used to simulate the effect on a modelled variable when biological effects on ocean carbon concentration and alkalinity are ignored. The partial pressure of a dissolved gas in sea water is the partial pressure in air with which it would be in equilibrium. The partial pressure of a gaseous constituent of air is the pressure that it would exert if all other gaseous constituents were removed, assuming the volume, the temperature, and its number of moles remain unchanged. The partial pressure difference between sea water and air is positive when the partial pressure of the dissolved gas in sea water is greater than the partial pressure in air. Pa - The surface called "surface" means the lower boundary of the atmosphere. The chemical formula for carbon dioxide is CO2. In ocean biogeochemistry models, a "natural analogue" is used to simulate the effect on a modelled variable of imposing preindustrial atmospheric carbon dioxide concentrations, even when the model as a whole may be subjected to varying forcings. The partial pressure of a gaseous constituent of air is the pressure which it alone would exert with unchanged temperature and number of moles per unit volume. The partial pressure of a dissolved gas in sea water is the partial pressure in air with which it would be in equilibrium. The partial pressure difference between sea water and air is positive when the partial pressure of the dissolved gas in sea water is greater than the partial pressure in air. + The surface called "surface" means the lower boundary of the atmosphere. The chemical formula for carbon dioxide is CO2. In ocean biogeochemistry models, a "natural analogue" is used to simulate the effect on a modelled variable of imposing preindustrial atmospheric carbon dioxide concentrations, even when the model as a whole may be subjected to varying forcings. The partial pressure of a dissolved gas in sea water is the partial pressure in air with which it would be in equilibrium. The partial pressure of a gaseous constituent of air is the pressure that it would exert if all other gaseous constituents were removed, assuming the volume, the temperature, and its number of moles remain unchanged. The partial pressure difference between sea water and air is positive when the partial pressure of the dissolved gas in sea water is greater than the partial pressure in air. Pa - The surface called "surface" means the lower boundary of the atmosphere. "Water" means water in all phases, including frozen i.e. ice and snow. The partial pressure of a gaseous constituent of air is the pressure which it alone would exert with unchanged temperature and number of moles per unit volume. + The surface called "surface" means the lower boundary of the atmosphere. The chemical formula for carbon dioxide is CO2. The partial pressure of a dissolved gas in sea water is the partial pressure in air with which it would be in equilibrium. The partial pressure of a gaseous constituent of air is the pressure that it would exert if all other gaseous constituents were removed, assuming the volume, the temperature, and its number of moles remain unchanged. The partial pressure difference between air and sea water is positive when the partial pressure in air is greater than the partial pressure of the dissolved gas in sea water. Pa - The partial pressure of a dissolved gas in sea water is the partial pressure in air with which it would be in equilibrium. The partial pressure of a gaseous constituent of air is the pressure which it alone would exert with unchanged temperature and number of moles per unit volume. The surface called "surface" means the lower boundary of the atmosphere. The chemical formula for carbon dioxide is CO2. + The surface called "surface" means the lower boundary of the atmosphere. The chemical formula for carbon dioxide is CO2. The partial pressure of a dissolved gas in sea water is the partial pressure in air with which it would be in equilibrium. The partial pressure of a gaseous constituent of air is the pressure that it would exert if all other gaseous constituents were removed, assuming the volume, the temperature, and its number of moles remain unchanged. The partial pressure difference between sea water and air is positive when the partial pressure of the dissolved gas in sea water is greater than the partial pressure in air. @@ -18856,6 +21617,13 @@ totals are summed to obtain the index. The surface called "surface" means the lower boundary of the atmosphere. "Diffuse" radiation is radiation that has been scattered by gas molecules in the atmosphere and by particles such as cloud droplets and aerosols. The term "shortwave" means shortwave radiation. Hemispherical reflectance is the ratio of the energy of the reflected to the incident radiation. This term gives the fraction of the surface_diffuse_downwelling_shortwave_flux_in_air which is reflected. If the diffuse radiation is isotropic, this term is equivalent to the integral of surface_bidirectional_reflectance over all incident angles and over all outgoing angles in the hemisphere above the surface. A coordinate variable of radiation_wavelength or radiation_frequency can be used to specify the wavelength or frequency, respectively, of the radiation. Shortwave hemispherical reflectance is related to albedo, but albedo is defined in terms of the fraction of the full spectrum of incident solar radiation which is reflected. It is related to the hemispherical reflectance averaged over all wavelengths using a weighting proportional to the incident radiative flux. + + W m-2 + + + The surface called "surface" means the lower boundary of the atmosphere. "Direct" (also known as "beam") radiation is radiation that has followed a direct path from the sun and is alternatively known as "direct insolation". The phrase "along_beam" refers to direct radiation on a plane perpendicular to the direction of the sun. This is in contrast to standard names such as direct_downwelling_shortwave_flux_in_air, where the radiation falls on a horizontal plane at the earth surface. The term "shortwave" means shortwave radiation. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The quantity with standard name surface_direct_along_beam_shortwave_flux_in_air is also called Direct Normal Irradiance (DNI) in the solar energy industry. + + W m-2 @@ -18874,14 +21642,28 @@ totals are summed to obtain the index. Pa E180 tauu - The surface called "surface" means the lower boundary of the atmosphere. "Eastward" indicates a vector component which is positive when directed eastward (negative westward). "Downward" indicates a vector component which is positive when directed downward (negative upward). "Downward eastward" indicates the ZX component of a tensor. A downward eastward stress is a downward flux of eastward momentum, which accelerates the lower medium eastward and the upper medium westward. The surface downward stress is the windstress on the surface. + The surface called "surface" means the lower boundary of the atmosphere. "Surface stress" means the shear stress (force per unit area) exerted by the wind at the surface. A downward stress is a downward flux of momentum. Over large bodies of water, wind stress can drive near-surface currents. "Downward" indicates a vector component which is positive when directed downward (negative upward). "Eastward" indicates a vector component which is positive when directed eastward (negative westward). "Downward eastward" indicates the ZX component of a tensor. A downward eastward stress is a downward flux of eastward momentum, which accelerates the lower medium eastward and the upper medium westward. Pa - The surface called "surface" means the lower boundary of the atmosphere. "Downward" indicates a vector component which is positive when directed downward (negative upward). "Eastward" indicates a vector component which is positive when directed eastward (negative westward). "Downward eastward" indicates the ZX component of a tensor. A downward eastward stress is a downward flux of eastward momentum, which accelerates the lower medium eastward and the upper medium westward. The surface downward stress is the wind stress on the surface. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Boundary layer mixing" means turbulent motions that transport heat, water, momentum and chemical constituents within the atmospheric boundary layer and affect exchanges between the surface and the atmosphere. The atmospheric boundary layer is typically characterised by a well-mixed sub-cloud layer of order 500 metres, and by a more extended conditionally unstable layer with boundary-layer clouds up to 2 km. (Reference: IPCC Third Assessment Report, Working Group 1: The Scientific Basis, 7.2.2.3, https://www.ipcc.ch/ipccreports/tar/wg1/273.htm). + The surface called "surface" means the lower boundary of the atmosphere. "Surface stress" means the shear stress (force per unit area) exerted by the wind at the surface. A downward stress is a downward flux of momentum. Over large bodies of water, wind stress can drive near-surface currents. "Downward" indicates a vector component which is positive when directed downward (negative upward). "Eastward" indicates a vector component which is positive when directed eastward (negative westward). "Downward eastward" indicates the ZX component of a tensor. A downward eastward stress is a downward flux of eastward momentum, which accelerates the lower medium eastward and the upper medium westward. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Boundary layer mixing" means turbulent motions that transport heat, water, momentum and chemical constituents within the atmospheric boundary layer and affect exchanges between the surface and the atmosphere. The atmospheric boundary layer is typically characterised by a well-mixed sub-cloud layer of order 500 metres, and by a more extended conditionally unstable layer with boundary-layer clouds up to 2 km. (Reference: IPCC Third Assessment Report, Working Group 1: The Scientific Basis, 7.2.2.3, https://archive.ipcc.ch/ipccreports/tar/wg1/273.htm). + + + + Pa + + + The surface called "surface" means the lower boundary of the atmosphere. "Surface stress" means the shear stress (force per unit area) exerted by the wind at the surface. A downward stress is a downward flux of momentum. Over large bodies of water, wind stress can drive near-surface currents. "Downward" indicates a vector component which is positive when directed downward (negative upward). "Eastward" indicates a vector component which is positive when directed northward (negative southward). "Downward eastward" indicates the ZX component of a tensor. A downward eastward stress is a downward flux of eastward momentum, which accelerates the lower medium eastward and the upper medium westward. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Viscosity" means the stress associated with viscous effects at the sea surface and is equivalent to the turbulent stress just outside the viscous sublayer. + + + + Pa + + + The surface called "surface" means the lower boundary of the atmosphere. "Surface stress" means the shear stress (force per unit area) exerted by the wind at the surface. A downward stress is a downward flux of momentum. Over large bodies of water, wind stress can drive near-surface currents. "Downward" indicates a vector component which is positive when directed downward (negative upward). "Eastward" indicates a vector component which is positive when directed northward (negative southward). "Downward eastward" indicates the ZX component of a tensor. A downward eastward stress is a downward flux of eastward momentum, which accelerates the lower medium eastward and the upper medium westward. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Sea surface waves" means the stress associated with form drag over sea surface waves. @@ -18961,6 +21743,13 @@ totals are summed to obtain the index. The surface called "surface" means the lower boundary of the atmosphere. "Downward" indicates a vector component which is positive when directed downward (negative upward). In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. In ocean biogeochemistry models, a "natural analogue" is used to simulate the effect on a modelled variable of imposing preindustrial atmospheric carbon dioxide concentrations, even when the model as a whole may be subjected to varying forcings. The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. The chemical formula for carbon dioxide is CO2. + + kg m-2 s-1 + + + The surface called "surface" means the lower boundary of the atmosphere. "Downward" indicates a vector component which is positive when directed downward (negative upward). In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The chemical formula for methane is CH4. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Non-wetland soils are all soils except for wetlands. Wetlands are areas where water covers the soil, or is present either at or near the surface of the soil all year or for varying periods of time during the year, including during the growing season. The precise conditions under which non-wetland soils produce and consume methane can vary between models. + + kg m-2 s-1 @@ -18986,14 +21775,14 @@ totals are summed to obtain the index. mol m-2 s-1 - The surface called "surface" means the lower boundary of the atmosphere. "Downward" indicates a vector component which is positive when directed downward (negative upward). In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The chemical formula of CFC11 is CFCl3. The IUPAC name fof CFC11 is trichloro-fluoro-methane. + The surface called "surface" means the lower boundary of the atmosphere. "Downward" indicates a vector component which is positive when directed downward (negative upward). In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The chemical formula of CFC11 is CFCl3. The IUPAC name for CFC11 is trichloro(fluoro)methane. mol m-2 s-1 - The surface called "surface" means the lower boundary of the atmosphere. "Downward" indicates a vector component which is positive when directed downward (negative upward). In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The chemical formula for CFC12 is CF2Cl2. The IUPAC name for CFC12 is dichloro-difluoro-methane. + The surface called "surface" means the lower boundary of the atmosphere. "Downward" indicates a vector component which is positive when directed downward (negative upward). In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The chemical formula for CFC12 is CF2Cl2. The IUPAC name for CFC12 is dichloro(difluoro)methane. @@ -19014,14 +21803,28 @@ totals are summed to obtain the index. Pa E181 tauv - The surface called "surface" means the lower boundary of the atmosphere. "Northward" indicates a vector component which is positive when directed northward (negative southward). "Downward" indicates a vector component which is positive when directed downward (negative upward). "Downward northward" indicates the ZY component of a tensor. A downward northward stress is a downward flux of northward momentum, which accelerates the lower medium northward and the upper medium southward. The surface downward stress is the windstress on the surface. + The surface called "surface" means the lower boundary of the atmosphere. "Surface stress" means the shear stress (force per unit area) exerted by the wind at the surface. A downward stress is a downward flux of momentum. Over large bodies of water, wind stress can drive near-surface currents. "Downward" indicates a vector component which is positive when directed downward (negative upward). "Northward" indicates a vector component which is positive when directed northward (negative southward). "Downward northward" indicates the ZY component of a tensor. A downward northward stress is a downward flux of northward momentum, which accelerates the lower medium northward and the upper medium southward. Pa - The surface called "surface" means the lower boundary of the atmosphere. "Downward" indicates a vector component which is positive when directed downward (negative upward). "Northward" indicates a vector component which is positive when directed northward (negative southward). Downward northward" indicates the ZY component of a tensor. A downward northward stress is a downward flux of northward momentum, which accelerates the lower medium northward and the upper medium southward. The surface downward stress is the wind stress on the surface. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Boundary layer mixing" means turbulent motions that transport heat, water, momentum and chemical constituents within the atmospheric boundary layer and affect exchanges between the surface and the atmosphere. The atmospheric boundary layer is typically characterised by a well-mixed sub-cloud layer of order 500 metres, and by a more extended conditionally unstable layer with boundary-layer clouds up to 2 km. (Reference: IPCC Third Assessment Report, Working Group 1: The Scientific Basis, 7.2.2.3, https://www.ipcc.ch/ipccreports/tar/wg1/273.htm). + The surface called "surface" means the lower boundary of the atmosphere. "Surface stress" means the shear stress (force per unit area) exerted by the wind at the surface. A downward stress is a downward flux of momentum. Over large bodies of water, wind stress can drive near-surface currents. "Downward" indicates a vector component which is positive when directed downward (negative upward). "Northward" indicates a vector component which is positive when directed northward (negative southward). "Downward northward" indicates the ZY component of a tensor. A downward northward stress is a downward flux of northward momentum, which accelerates the lower medium northward and the upper medium southward. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Boundary layer mixing" means turbulent motions that transport heat, water, momentum and chemical constituents within the atmospheric boundary layer and affect exchanges between the surface and the atmosphere. The atmospheric boundary layer is typically characterised by a well-mixed sub-cloud layer of order 500 metres, and by a more extended conditionally unstable layer with boundary-layer clouds up to 2 km. (Reference: IPCC Third Assessment Report, Working Group 1: The Scientific Basis, 7.2.2.3, https://archive.ipcc.ch/ipccreports/tar/wg1/273.htm). + + + + Pa + + + The surface called "surface" means the lower boundary of the atmosphere. "Surface stress" means the shear stress (force per unit area) exerted by the wind at the surface. A downward stress is a downward flux of momentum. Over large bodies of water, wind stress can drive near-surface currents. "Downward" indicates a vector component which is positive when directed downward (negative upward). "Northward" indicates a vector component which is positive when directed northward (negative southward). "Downward northward" indicates the ZY component of a tensor. A downward northward stress is a downward flux of northward momentum, which accelerates the lower medium northward and the upper medium southward. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Viscosity" means the stress associated with viscous effects at the sea surface and is equivalent to the turbulent stress just outside the viscous sublayer. + + + + Pa + + + The surface called "surface" means the lower boundary of the atmosphere. "Surface stress" means the shear stress (force per unit area) exerted by the wind at the surface. A downward stress is a downward flux of momentum. Over large bodies of water, wind stress can drive near-surface currents. "Downward" indicates a vector component which is positive when directed downward (negative upward). "Northward" indicates a vector component which is positive when directed northward (negative southward). "Downward northward" indicates the ZY component of a tensor. A downward northward stress is a downward flux of northward momentum, which accelerates the lower medium northward and the upper medium southward. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Sea surface waves" means the stress associated with form drag over sea surface waves. @@ -19042,28 +21845,28 @@ totals are summed to obtain the index. Pa - The surface called "surface" means the lower boundary of the atmosphere. "x" indicates a vector component along the grid x-axis, positive with increasing x. "Downward" indicates a vector component which is positive when directed downward (negative upward). + The surface called "surface" means the lower boundary of the atmosphere. "Surface stress" means the shear stress (force per unit area) exerted by the wind at the surface. A downward stress is a downward flux of momentum. Over large bodies of water, wind stress can drive near-surface currents. "Downward" indicates a vector component which is positive when directed downward (negative upward). "x" indicates a vector component along the grid x-axis, positive with increasing x. "Downward x" indicates the ZX component of a tensor. A downward x stress is a downward flux of momentum, which accelerates the lower medium in the direction of increasing x and and the upper medium in the direction of decreasing x. Pa - "x" indicates a vector component along the grid x-axis, positive with increasing x. "Downward" indicates a vector component which is positive when directed downward (negative upward). The surface called "surface" means the lower boundary of the atmosphere. A downward x stress is a downward flux of momentum towards the positive direction of the model's x-axis. + The surface called "surface" means the lower boundary of the atmosphere. "Surface stress" means the shear stress (force per unit area) exerted by the wind at the surface. A downward stress is a downward flux of momentum. Over large bodies of water, wind stress can drive near-surface currents. "Downward" indicates a vector component which is positive when directed downward (negative upward). "x" indicates a vector component along the grid x-axis, positive with increasing x. "Downward x" indicates the ZX component of a tensor. A downward x stress is a downward flux of momentum, which accelerates the lower medium in the direction of increasing x and and the upper medium in the direction of decreasing x. A positive correction is downward i.e. added to the ocean. Pa - The surface called "surface" means the lower boundary of the atmosphere. "y" indicates a vector component along the grid y-axis, positive with increasing y. "Downward" indicates a vector component which is positive when directed downward (negative upward). + The surface called "surface" means the lower boundary of the atmosphere. "Surface stress" means the shear stress (force per unit area) exerted by the wind at the surface. A downward stress is a downward flux of momentum. Over large bodies of water, wind stress can drive near-surface currents. "Downward" indicates a vector component which is positive when directed downward (negative upward). "y" indicates a vector component along the grid y-axis, positive with increasing y. "Downward y" indicates the ZY component of a tensor. A downward y stress is a downward flux of momentum, which accelerates the lower medium in the direction of increasing y and and the upper medium in the direction of decreasing y. Pa - "y" indicates a vector component along the grid y-axis, positive with increasing y. "Downward" indicates a vector component which is positive when directed downward (negative upward). The surface called "surface" means the lower boundary of the atmosphere. A downward y stress is a downward flux of momentum towards the positive direction of the model's y-axis. + The surface called "surface" means the lower boundary of the atmosphere. "Surface stress" means the shear stress (force per unit area) exerted by the wind at the surface. A downward stress is a downward flux of momentum. Over large bodies of water, wind stress can drive near-surface currents. "Downward" indicates a vector component which is positive when directed downward (negative upward). "y" indicates a vector component along the grid y-axis, positive with increasing y. "Downward y" indicates the ZY component of a tensor. A downward y stress is a downward flux of momentum, which accelerates the lower medium in the direction of increasing y and and the upper medium in the direction of decreasing y. A positive correction is downward i.e. added to the ocean. @@ -19080,6 +21883,20 @@ totals are summed to obtain the index. The surface called "surface" means the lower boundary of the atmosphere. Downwelling radiation is radiation from above. It does not mean "net downward". The sign convention is that "upwelling" is positive upwards and "downwelling" is positive downwards. The term "longwave" means longwave radiation. When thought of as being incident on a surface, a radiative flux is sometimes called "irradiance". In addition, it is identical with the quantity measured by a cosine-collector light-meter and sometimes called "vector irradiance". In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. A phrase assuming_condition indicates that the named quantity is the value which would obtain if all aspects of the system were unaltered except for the assumption of the circumstances specified by the condition. "Clear sky" means in the absence of clouds. + + W/m2 + + + The surface called "surface" means the lower boundary of the atmosphere. Upwelling radiation is radiation from below. It does not mean "net upward". The sign convention is that "upwelling" is positive upwards and "downwelling" is positive downwards. The term "longwave" means longwave radiation. When thought of as being incident on a surface, a radiative flux is sometimes called "irradiance". In addition, it is identical with the quantity measured by a cosine-collector light-meter and sometimes called "vector irradiance". In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. A phrase assuming_condition indicates that the named quantity is the value which would obtain if all aspects of the system were unaltered except for the assumption of the circumstances specified by the condition. "Clear sky" means in the absence of clouds. This 3D ozone field acts as a reference ozone field in a diagnostic call to the model's radiation scheme. It is expressed in terms of mole fraction of ozone in air. It may be observation-based or model-derived. It may be from any time period. By using the same ozone reference in the diagnostic radiation call in two model simulations and calculating differences between the radiative flux diagnostics from the prognostic call to the radiation scheme and the diagnostic call to the radiation scheme with the ozone reference, an instantaneous radiative forcing for ozone can be calculated. + + + + W/m2 + + + The surface called "surface" means the lower boundary of the atmosphere. Downwelling radiation is radiation from above. It does not mean "net downward". The sign convention is that "upwelling" is positive upwards and "downwelling" is positive downwards. The term "longwave" means longwave radiation. When thought of as being incident on a surface, a radiative flux is sometimes called "irradiance". In addition, it is identical with the quantity measured by a cosine-collector light-meter and sometimes called "vector irradiance". In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. A phrase assuming_condition indicates that the named quantity is the value which would obtain if all aspects of the system were unaltered except for the assumption of the circumstances specified by the condition. This 3D ozone field acts as a reference ozone field in a diagnostic call to the model's radiation scheme. It is expressed in terms of mole fraction of ozone in air. It may be observation-based or model-derived. It may be from any time period. By using the same ozone reference in the diagnostic radiation call in two model simulations and calculating differences between the radiative flux diagnostics from the prognostic call to the radiation scheme and the diagnostic call to the radiation scheme with the ozone reference, an instantaneous radiative forcing for ozone can be calculated. + + W m-2 @@ -19241,6 +22058,20 @@ totals are summed to obtain the index. The surface called "surface" means the lower boundary of the atmosphere. Downwelling radiation is radiation from above. It does not mean "net downward". The sign convention is that "upwelling" is positive upwards and "downwelling" is positive downwards. The term "shortwave" means shortwave radiation. Surface downwelling shortwave is the sum of direct and diffuse solar radiation incident on the surface, and is sometimes called "global radiation". When thought of as being incident on a surface, a radiative flux is sometimes called "irradiance". In addition, it is identical with the quantity measured by a cosine-collector light-meter and sometimes called "vector irradiance". In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. A phrase "assuming_condition" indicates that the named quantity is the value which would obtain if all aspects of the system were unaltered except for the assumption of the circumstances specified by the condition. "Clear sky" means in the absence of clouds. + + W/m2 + + + The surface called "surface" means the lower boundary of the atmosphere. Downwelling radiation is radiation from above. It does not mean "net downward". The sign convention is that "upwelling" is positive upwards and "downwelling" is positive downwards. The term "longwave" means longwave radiation. When thought of as being incident on a surface, a radiative flux is sometimes called "irradiance". In addition, it is identical with the quantity measured by a cosine-collector light-meter and sometimes called "vector irradiance". In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. A phrase assuming_condition indicates that the named quantity is the value which would obtain if all aspects of the system were unaltered except for the assumption of the circumstances specified by the condition. "Clear sky" means in the absence of clouds. This 3D ozone field acts as a reference ozone field in a diagnostic call to the model's radiation scheme. It is expressed in terms of mole fraction of ozone in air. It may be observation-based or model-derived. It may be from any time period. By using the same ozone reference in the diagnostic radiation call in two model simulations and calculating differences between the radiative flux diagnostics from the prognostic call to the radiation scheme and the diagnostic call to the radiation scheme with the ozone reference, an instantaneous radiative forcing for ozone can be calculated. + + + + W/m2 + + + The surface called "surface" means the lower boundary of the atmosphere. Downwelling radiation is radiation from above. It does not mean "net downward". The sign convention is that "upwelling" is positive upwards and "downwelling" is positive downwards. The term "longwave" means longwave radiation. When thought of as being incident on a surface, a radiative flux is sometimes called "irradiance". In addition, it is identical with the quantity measured by a cosine-collector light-meter and sometimes called "vector irradiance". In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. A phrase assuming_condition indicates that the named quantity is the value which would obtain if all aspects of the system were unaltered except for the assumption of the circumstances specified by the condition. This 3D ozone field acts as a reference ozone field in a diagnostic call to the model's radiation scheme. It is expressed in terms of mole fraction of ozone in air. It may be observation-based or model-derived. It may be from any time period. By using the same ozone reference in the diagnostic radiation call in two model simulations and calculating differences between the radiative flux diagnostics from the prognostic call to the radiation scheme and the diagnostic call to the radiation scheme with the ozone reference, an instantaneous radiative forcing for ozone can be calculated. + + W m-2 @@ -19399,7 +22230,7 @@ totals are summed to obtain the index. Pa - The partial pressure of a dissolved gas in sea water is the partial pressure in air with which it would be in equilibrium. The partial pressure of a gaseous constituent of air is the pressure which it alone would exert with unchanged temperature and number of moles per unit volume. The surface called "surface" means the lower boundary of the atmosphere. + The surface called "surface" means the lower boundary of the atmosphere. The chemical formula for molecular oxygen is O2. The partial pressure of a dissolved gas in sea water is the partial pressure in air with which it would be in equilibrium. The partial pressure of a gaseous constituent of air is the pressure that it would exert if all other gaseous constituents were removed, assuming the volume, the temperature, and its number of moles remain unchanged. The partial pressure difference between sea water and air is positive when the partial pressure of the dissolved gas in sea water is greater than the partial pressure in air. @@ -19546,32 +22377,32 @@ totals are summed to obtain the index. Pa - The surface called "surface" means the lower boundary of the atmosphere. The chemical formula for carbon dioxide is CO2. In ocean biogeochemistry models, an "abiotic analogue" is used to simulate the effect on a modelled variable when biological effects on ocean carbon concentration and alkalinity are ignored. The partial pressure of a dissolved gas in sea water is the partial pressure in air with which it would be in equilibrium. The partial pressure of a gaseous constituent of air is the pressure which it alone would exert with unchanged temperature and number of moles per unit volume. + The chemical formula for carbon dioxide is CO2. In ocean biogeochemistry models, an "abiotic analogue" is used to simulate the effect on a modelled variable when biological effects on ocean carbon concentration and alkalinity are ignored. The partial pressure of a dissolved gas in sea water is the partial pressure in air with which it would be in equilibrium. The partial pressure of a gaseous constituent of air is the pressure that it would exert if all other gaseous constituents were removed, assuming the volume, the temperature, and its number of moles remain unchanged. Pa - The surface called "surface" means the lower boundary of the atmosphere. The partial pressure of a gaseous constituent of air is the pressure which it alone would exert with unchanged temperature and number of moles per unit volume. + The surface called "surface" means the lower boundary of the atmosphere. The partial pressure of a gaseous constituent of air is the pressure that it would exert if all other gaseous constituents were removed, assuming the volume, the temperature, and its number of moles remain unchanged. The chemical formula for carbon dioxide is CO2. Pa - The surface called "surface" means the lower boundary of the atmosphere. The partial pressure of a dissolved gas in sea water is the partial pressure in air with which it would be in equilibrium. The partial pressure of a gaseous constituent of air is the pressure which it alone would exert with unchanged temperature and number of moles per unit volume. The chemical formula for carbon dioxide is CO2. + The partial pressure of a dissolved gas in sea water is the partial pressure in air with which it would be in equilibrium. The partial pressure of a gaseous constituent of air is the pressure that it would exert if all other gaseous constituents were removed, assuming the volume, the temperature, and its number of moles remain unchanged. The chemical formula for carbon dioxide is CO2. Pa - The surface called "surface" means the lower boundary of the atmosphere. The chemical formula for carbon dioxide is CO2. In ocean biogeochemistry models, a "natural analogue" is used to simulate the effect on a modelled variable of imposing preindustrial atmospheric carbon dioxide concentrations, even when the model as a whole may be subjected to varying forcings. The partial pressure of a dissolved gas in sea water is the partial pressure in air with which it would be in equilibrium. The partial pressure of a gaseous constituent of air is the pressure which it alone would exert with unchanged temperature and number of moles per unit volume. + The chemical formula for carbon dioxide is CO2. In ocean biogeochemistry models, a "natural analogue" is used to simulate the effect on a modelled variable of imposing preindustrial atmospheric carbon dioxide concentrations, even when the model as a whole may be subjected to varying forcings. The partial pressure of a dissolved gas in sea water is the partial pressure in air with which it would be in equilibrium. The partial pressure of a gaseous constituent of air is the pressure that it would exert if all other gaseous constituents were removed, assuming the volume, the temperature, and its number of moles remain unchanged. - + Bq m-2 The surface called "surface" means the lower boundary of the atmosphere. "Content" indicates a quantity per unit area. "Radioactivity" means the number of radioactive decays of a material per second. @@ -22129,17 +24960,24 @@ totals are summed to obtain the index. m - The surface called "surface" means the lower boundary of the atmosphere. + The height above the surface where the mean value of heat assumes its surface value when extrapolated along a logarithmic profile downward towards the surface. The surface called "surface" means the lower boundary of the atmosphere. - + m - The surface called "surface" means the lower boundary of the atmosphere. + The height above the surface where the mean value of humidity assumes its surface value when extrapolated along a logarithmic profile downward towards the surface. The surface called "surface" means the lower boundary of the atmosphere. - + + m + + + The height above the displacement plane at which the mean wind becomes zero when extrapolating the logarithmic wind speed profile downward through the surface layer. The surface called "surface" means the lower boundary of the atmosphere. + + + kg m-2 90 @@ -22153,95 +24991,130 @@ totals are summed to obtain the index. The surface called "surface" means the lower boundary of the atmosphere. Runoff is the liquid water which drains from land. If not specified, "runoff" refers to the sum of surface runoff and subsurface drainage. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. + + m s-1 + + + A velocity is a vector quantity. "x" indicates a vector component along the grid x-axis, positive with increasing x. Ocean currents are related to phenomena of different nature and processes, such as density currents, currents raised by the wind, tide, wave propagation, mass flow in estuaries, etc. This standard name refers to the sum of currents of all origins. + + + + m s-1 + + + A velocity is a vector quantity. "x" indicates a vector component along the grid x-axis, positive with increasing x. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Tides are the rise and fall of sea levels caused by the combined effects of the gravitational forces exerted by the Moon and the Sun, and the rotation of the Earth. This rise in water level is accompanied by a horizontal movement of water called the tidal current. + + + + m s-1 + + + A velocity is a vector quantity. "y" indicates a vector component along the grid y-axis, positive with increasing y. Ocean currents are related to phenomena of different nature and processes, such as density currents, currents raised by the wind, tide, wave propagation, mass flow in estuaries, etc. This Standard Name refers to the sum of currents of all origins. + + + + m s-1 + + + A velocity is a vector quantity. "y" indicates a vector component along the grid y-axis, positive with increasing y. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Tides are the rise and fall of sea levels caused by the combined effects of the gravitational forces exerted by the Moon and the Sun, and the rotation of the Earth. This rise in water level is accompanied by a horizontal movement of water called the tidal current. + + kg m-2 65 snw - The surface called "surface" means the lower boundary of the atmosphere. "Amount" means mass per unit area. Surface amount refers to the amount on the ground, excluding that on the plant or vegetation canopy. + "Amount" means mass per unit area. Surface snow amount refers to the amount on the solid ground or on surface ice cover, but excludes, for example, falling snowflakes and snow on plants. kg m-2 s-1 - In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The surface called "surface" means the lower boundary of the atmosphere. "Surface snow and ice melt flux" means the mass flux of all melting at the surface. + In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The surface called "surface" means the lower boundary of the atmosphere. "Surface snow and ice melt flux" means the mass flux of all melting at the surface. Surface snow refers to the snow on the solid ground or on surface ice cover, but excludes, for example, falling snowflakes and snow on plants. W m-2 - The surface called "surface" means the lower boundary of the atmosphere. The snow and ice melt heat flux is the supply of latent heat which is melting snow and ice at freezing point. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. + The snow and ice melt heat flux is the supply of latent heat which is melting snow and ice at freezing point. Surface snow refers to the snow on the solid ground or on surface ice cover, but excludes, for example, falling snowflakes and snow on plants. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. kg m-2 s-1 - In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The surface called "surface" means the lower boundary of the atmosphere. "Surface snow and ice refreezing flux" means the mass flux of surface meltwater which refreezes within the snow or firn. + "Surface snow and ice refreezing flux" means the mass flux of surface meltwater which refreezes within the snow or firn. The surface called "surface" means the lower boundary of the atmosphere. Surface snow refers to the snow on the solid ground or on surface ice cover, but excludes, for example, falling snowflakes and snow on plants. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. 1 snc - The surface called "surface" means the lower boundary of the atmosphere. "X_area_fraction" means the fraction of horizontal area occupied by X. "X_area" means the horizontal area occupied by X within the grid cell. + "Area fraction" is the fraction of a grid cell's horizontal area that has some characteristic of interest. It is evaluated as the area of interest divided by the grid cell area. It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. Surface snow refers to the snow on the solid ground or on surface ice cover, but excludes, for example, falling snowflakes and snow on plants. 1 - X"_binary_mask" has 1 where condition X is met, 0 elsewhere. The value is 1 where the snow cover area fraction is greater than a threshold, and 0 elsewhere. The threshold must be specified by associating a coordinate variable or scalar coordinate variable with the data variable and giving the coordinate variable a standard name of surface_snow_area_fraction. The values of the coordinate variable are the threshold values for the corresponding subarrays of the data variable. + X"_binary_mask" has 1 where condition X is met, 0 elsewhere. The value is 1 where the snow cover area fraction is greater than a threshold, and 0 elsewhere. The threshold must be specified by associating a coordinate variable or scalar coordinate variable with the data variable and giving the coordinate variable a standard name of surface_snow_area_fraction. The values of the coordinate variable are the threshold values for the corresponding subarrays of the data variable. Surface snow refers to the snow on the solid ground or on surface ice cover, but excludes, for example, falling snowflakes and snow on plants. + + + + kg m-3 + + + Snow density is the density of the snow cover. Surface snow refers to the snow on the solid ground or on surface ice cover, but excludes, for example, falling snowflakes and snow on plants. The density of a substance is its mass per unit volume. kg m-2 99 - The surface called "surface" means the lower boundary of the atmosphere. "Amount" means mass per unit area. + Surface snow refers to the snow on the solid ground or on surface ice cover, but excludes, for example, falling snowflakes and snow on plants. The surface called "surface" means the lower boundary of the atmosphere. "Amount" means mass per unit area. W m-2 - The surface called "surface" means the lower boundary of the atmosphere. Sublimation is the conversion of solid into vapor. The snow melt and sublimation heat flux is the supply of latent heat which converting snow to liquid water (melting) and water vapor (sublimation). In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. + Surface snow refers to the snow on the solid ground or on surface ice cover, but excludes, for example, falling snowflakes and snow on plants. Sublimation is the conversion of solid into vapor. The snow melt and sublimation heat flux is the supply of latent heat which is converting snow to liquid water (melting) and water vapor (sublimation). In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. kg m-2 s-1 snm - The surface called "surface" means the lower boundary of the atmosphere. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. + Surface snow refers to the snow on the solid ground or on surface ice cover, but excludes, for example, falling snowflakes and snow on plants. The surface called "surface" means the lower boundary of the atmosphere. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. W m-2 - The surface called "surface" means the lower boundary of the atmosphere. The snow melt heat flux is the supply of latent heat which is melting snow at freezing point. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. + Surface snow refers to the snow on the solid ground or on surface ice cover, but excludes, for example, falling snowflakes and snow on plants. The snow melt heat flux is the supply of latent heat which is melting snow at freezing point. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. kg m-2 - The phrase "surface_snow" means snow lying on the surface. "Amount" means mass per unit area. Sublimation is the conversion of solid into vapor. + Surface snow refers to the snow on the solid ground or on surface ice cover, but excludes, for example, falling snowflakes and snow on plants. "Amount" means mass per unit area. Sublimation is the conversion of solid into vapor. W m-2 - The surface called "surface" means the lower boundary of the atmosphere. Sublimation is the conversion of solid into vapor. The snow sublimation heat flux is the supply of latent heat which is causing evaporation of snow to water vapor. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. + Surface snow refers to the snow on the solid ground or on surface ice cover, but excludes, for example, falling snowflakes and snow on plants. Sublimation is the conversion of solid into vapor. The snow sublimation heat flux is the supply of latent heat which is causing evaporation of snow to water vapor. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. m 66 snd - The surface called "surface" means the lower boundary of the atmosphere. Unless indicated in the cell_methods attribute, a quantity is assumed to apply to the whole area of each horizontal grid box. Previously, the qualifier where_type was used to specify that the quantity applies only to the part of the grid box of the named type. Names containing the where_type qualifier are deprecated and newly created data should use the cell_methods attribute to indicate the horizontal area to which the quantity applies. + Surface snow refers to the snow on the solid ground or on surface ice cover, but excludes, for example, falling snowflakes and snow on plants. "Thickness" means the vertical extent of a layer. Unless indicated in the cell_methods attribute, a quantity is assumed to apply to the whole area of each horizontal grid box. Previously, the qualifier where_type was used to specify that the quantity applies only to the part of the grid box of the named type. Names containing the where_type qualifier are deprecated and newly created data should use the cell_methods attribute to indicate the horizontal area to which the quantity applies. @@ -22255,14 +25128,21 @@ totals are summed to obtain the index. K E139 ts - The surface called "surface" means the lower boundary of the atmosphere. The surface temperature is the temperature at the interface, not the bulk temperature of the medium above or below. Unless indicated in the cell_methods attribute, a quantity is assumed to apply to the whole area of each horizontal grid box. Previously, the qualifier where_type was used to specify that the quantity applies only to the part of the grid box of the named type. Names containing the where_type qualifier are deprecated and newly created data should use the cell_methods attribute to indicate the horizontal area to which the quantity applies. + The surface called "surface" means the lower boundary of the atmosphere. The surface temperature is the temperature at the interface, not the bulk temperature of the medium above or below. Unless indicated in the cell_methods attribute, a quantity is assumed to apply to the whole area of each horizontal grid box. Previously, the qualifier where_type was used to specify that the quantity applies only to the part of the grid box of the named type. Names containing the where_type qualifier are deprecated and newly created data should use the cell_methods attribute to indicate the horizontal area to which the quantity applies. In order to convert the units correctly, it is essential to know whether a temperature is on-scale or a difference. Therefore this standard strongly recommends that any variable whose units involve a temperature unit should also have a units_metadata attribute to make the distinction. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K - The surface called "surface" means the lower boundary of the atmosphere. "anomaly" means difference from climatology. The surface temperature is the (skin) temperature at the interface, not the bulk temperature of the medium above or below. + The surface called "surface" means the lower boundary of the atmosphere. "anomaly" means difference from climatology. The surface temperature is the (skin) temperature at the interface, not the bulk temperature of the medium above or below. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). + + + + Pa + + + The surface called "surface" means the lower boundary of the atmosphere. "Surface stress" means the shear stress (force per unit area) exerted at the surface. An upward stress is an upward flux of momentum into the atmosphere. "Upward" indicates a vector component which is positive when directed upward (negative downward). "Eastward" indicates a vector component which is positive when directed northward (negative southward). "Upward eastward" indicates the ZX component of a tensor. An upward eastward stress is an upward flux of eastward momentum, which accelerates the upper medium eastward and the lower medium westward. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Sea surface waves" means the stress associated with oscillatory motions of a wavy sea surface. @@ -22286,6 +25166,13 @@ totals are summed to obtain the index. The surface called "surface" means the lower boundary of the atmosphere. "Upward" indicates a vector component which is positive when directed upward (negative downward). The surface latent heat flux is the exchange of heat between the surface and the air on account of evaporation (including sublimation). In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. + + W m-2 + + + The quantity with standard name surface_upward_latent_heat_flux_due_to_evaporation does not include transpiration from vegetation. The surface called "surface" means the lower boundary of the atmosphere. "Upward" indicates a vector component which is positive when directed upward (negative downward). In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Evaporation is the conversion of liquid or solid into vapor. (The conversion of solid alone into vapor is called "sublimation"). The surface latent heat flux is the exchange of heat between the surface and the air on account of evaporation (including sublimation). + + W m-2 @@ -22475,6 +25362,27 @@ totals are summed to obtain the index. "Upward" indicates a vector component which is positive when directed upward (negative downward). In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Heterotrophic respiration is respiration by heterotrophs ("consumers"), which are organisms (including animals and decomposers) that consume other organisms or dead organic material, rather than synthesising organic material from inorganic precursors using energy from the environment (especially sunlight) as autotrophs ("producers") do. Heterotrophic respiration goes on within both the soil and litter pools. + + kg m-2 s-1 + + + Methane emitted from the surface, generated by biomass burning (fires). Positive direction upwards. The surface called "surface" means the lower boundary of the atmosphere. "Upward" indicates a vector component which is positive when directed upward (negative downward). In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The chemical formula for methane is CH4. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. the surface of the earth). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The term "fires" means all biomass fires, whether naturally occurring or ignited by humans. The precise conditions under which fires produce and consume methane can vary between models. + + + + kg m-2 s-1 + + + The surface called "surface" means the lower boundary of the atmosphere. "Upward" indicates a vector component which is positive when directed upward (negative downward). In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The chemical formula for methane is CH4. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. the surface of the earth). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. Herbivores are animals that feed on vegetation. Mammals are any vertebrates within the class Mammalia. Examples of large herbivorous mammals include cows, elks, and buffalos. These animals eat grass, tree bark, aquatic vegetation, and shrubby growth. Herbivores can also be medium-sized animals such as sheep and goats, which eat shrubby vegetation and grasses. Small herbivores include rabbits, chipmunks, squirrels, and mice. The precise conditions under which herbivorous mammals produce and consume methane can vary between models. + + + + kg m-2 s-1 + + + The surface called "surface" means the lower boundary of the atmosphere. "Upward" indicates a vector component which is positive when directed upward (negative downward). In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The chemical formula for methane is CH4. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. the surface of the earth). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. Termites belong to any of a group of cellulose-eating insects, the social system of which shows remarkable parallels with those of ants and bees, although it has evolved independently. The precise conditions under which termites produce and consume methane can vary between models. + + kg m-2 s-1 @@ -22535,7 +25443,14 @@ totals are summed to obtain the index. mol m-2 s-1 - "Upward" indicates a vector component which is positive when directed upward (negative downward). In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The surface called "surface" means the lower boundary of the atmosphere. Thechemical formula for dimethyl sulfide is (CH3)2S. Dimethyl sulfide is sometimes referredto as DMS. + The surface called "surface" means the lower boundary of the atmosphere. "Upward" indicates a vector component which is positive when directed upward (negative downward). In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The chemical formula for dimethyl sulfide is (CH3)2S. Dimethyl sulfide is sometimes referred to as DMS. + + + + Pa + + + The surface called "surface" means the lower boundary of the atmosphere. "Surface stress" means the shear stress (force per unit area) exerted at the surface. An upward stress is an upward flux of momentum into the atmosphere. "Upward" indicates a vector component which is positive when directed upward (negative downward). "Northward" indicates a vector component which is positive when directed northward (negative southward). "Upward northward" indicates the ZY component of a tensor. An upward northward stress is an upward flux of northward momentum, which accelerates the upper medium northward and the lower medium southward. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Sea surface waves" means the stress associated with oscillatory motions of a wavy sea surface. @@ -22573,6 +25488,13 @@ totals are summed to obtain the index. The surface called "surface" means the lower boundary of the atmosphere. Upwelling radiation is radiation from below. It does not mean "net upward". The sign convention is that "upwelling" is positive upwards and "downwelling" is positive downwards. The term "longwave" means longwave radiation. When thought of as being incident on a surface, a radiative flux is sometimes called "irradiance". In addition, it is identical with the quantity measured by a cosine-collector light-meter and sometimes called "vector irradiance". In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. A phrase assuming_condition indicates that the named quantity is the value which would obtain if all aspects of the system were unaltered except for the assumption of the circumstances specified by the condition. "Clear sky" means in the absence of clouds. + + W/m2 + + + The surface called "surface" means the lower boundary of the atmosphere. Upwelling radiation is radiation from below. It does not mean "net upward". The sign convention is that "upwelling" is positive upwards and "downwelling" is positive downwards. The term "longwave" means longwave radiation. When thought of as being incident on a surface, a radiative flux is sometimes called "irradiance". In addition, it is identical with the quantity measured by a cosine-collector light-meter and sometimes called "vector irradiance". In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. A phrase assuming_condition indicates that the named quantity is the value which would obtain if all aspects of the system were unaltered except for the assumption of the circumstances specified by the condition. This 3D ozone field acts as a reference ozone field in a diagnostic call to the model's radiation scheme. It is expressed in terms of mole fraction of ozone in air. It may be observation-based or model-derived. It may be from any time period. By using the same ozone reference in the diagnostic radiation call in two model simulations and calculating differences between the radiative flux diagnostics from the prognostic call to the radiation scheme and the diagnostic call to the radiation scheme with the ozone reference, an instantaneous radiative forcing for ozone can be calculated. + + mol m-2 s-1 @@ -22671,6 +25593,20 @@ totals are summed to obtain the index. The surface called "surface" means the lower boundary of the atmosphere. Upwelling radiation is radiation from below. It does not mean "net upward". The sign convention is that "upwelling" is positive upwards and "downwelling" is positive downwards. The term "shortwave" means shortwave radiation. When thought of as being incident on a surface, a radiative flux is sometimes called "irradiance". In addition, it is identical with the quantity measured by a cosine-collector light-meter and sometimes called "vector irradiance". In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. A phrase "assuming_condition" indicates that the named quantity is the value which would obtain if all aspects of the system were unaltered except for the assumption of the circumstances specified by the condition. "Clear sky" means in the absence of clouds. + + W/m2 + + + The surface called "surface" means the lower boundary of the atmosphere. Upwelling radiation is radiation from below. It does not mean "net upward". The sign convention is that "upwelling" is positive upwards and "downwelling" is positive downwards. The term "shortwave" means shortwave radiation. When thought of as being incident on a surface, a radiative flux is sometimes called "irradiance". In addition, it is identical with the quantity measured by a cosine-collector light-meter and sometimes called "vector irradiance". In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. A phrase "assuming_condition" indicates that the named quantity is the value which would obtain if all aspects of the system were unaltered except for the assumption of the circumstances specified by the condition. "Clear sky" means in the absence of clouds. This 3D ozone field acts as a reference ozone field in a diagnostic call to the model's radiation scheme. It is expressed in terms of mole fraction of ozone in air. It may be observation-based or model-derived. It may be from any time period. By using the same ozone reference in the diagnostic radiation call in two model simulations and calculating differences between the radiative flux diagnostics from the prognostic call to the radiation scheme and the diagnostic call to the radiation scheme with the ozone reference, an instantaneous radiative forcing for ozone can be calculated. + + + + W/m2 + + + The surface called "surface" means the lower boundary of the atmosphere. Upwelling radiation is radiation from below. It does not mean "net upward". The sign convention is that "upwelling" is positive upwards and "downwelling" is positive downwards. The term "shortwave" means shortwave radiation. When thought of as being incident on a surface, a radiative flux is sometimes called "irradiance". In addition, it is identical with the quantity measured by a cosine-collector light-meter and sometimes called "vector irradiance". In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. A phrase "assuming_condition" indicates that the named quantity is the value which would obtain if all aspects of the system were unaltered except for the assumption of the circumstances specified by the condition. This 3D ozone field acts as a reference ozone field in a diagnostic call to the model's radiation scheme. It is expressed in terms of mole fraction of ozone in air. It may be observation-based or model-derived. It may be from any time period. By using the same ozone reference in the diagnostic radiation call in two model simulations and calculating differences between the radiative flux diagnostics from the prognostic call to the radiation scheme and the diagnostic call to the radiation scheme with the ozone reference, an instantaneous radiative forcing for ozone can be calculated. + + kg m-2 @@ -22685,35 +25621,39 @@ totals are summed to obtain the index. The surface called "surface" means the lower boundary of the atmosphere. "Water" means water in all phases, including frozen i.e. ice and snow. Evaporation is the conversion of liquid or solid into vapor. (The conversion of solid alone into vapor is called "sublimation"). The quantity with standard name surface_water_evaporation_flux does not include transpiration from vegetation. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. Unless indicated in the cell_methods attribute, a quantity is assumed to apply to the whole area of each horizontal grid box. Previously, the qualifier where_type was used to specify that the quantity applies only to the part of the grid box of the named type. Names containing the where_type qualifier are deprecated and newly created data should use the cell_methods attribute to indicate the horizontal area to which the quantity applies. + + 1 + + + A quality flag that reports the result of the Syntax test, which checks that the data contain no indicators of flawed transmission. The linkage between the data variable and this variable is achieved using the ancillary_variables attribute. There are standard names for other specific quality tests which take the form of X_quality_flag. Quality information that does not match any of the specific quantities should be given the more general standard name of quality_flag. + + K - The quantity with standard name temperature_at_base_of_ice_sheet_model is the lower boundary temperature that is used to force ice sheet models. Beneath ice shelves it is the temperature at the ice-ocean interface. Beneath grounded ice, it is the temperature at the ice-bedrock interface. In all instances the temperature is that of the interface itself and not that of the medium above or below the interface. + The quantity with standard name temperature_at_base_of_ice_sheet_model is the lower boundary temperature that is used to force ice sheet models. Beneath ice shelves it is the temperature at the ice-ocean interface. Beneath grounded ice, it is the temperature at the ice-bedrock interface. In all instances the temperature is that of the interface itself and not that of the medium above or below the interface. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K - The quantity with standard name temperature_at_top_of_ice_sheet_model is the upper boundary temperature that is used to force ice sheet models. It is the temperature at the interface between the ice sheet and the overlying medium which may be snow or the atmosphere. In all instances the temperature is that of the interface itself and not that of the medium above or below the interface. + The quantity with standard name temperature_at_top_of_ice_sheet_model is the upper boundary temperature that is used to force ice sheet models. It is the temperature at the interface between the ice sheet and the overlying medium which may be snow or the atmosphere. In all instances the temperature is that of the interface itself and not that of the medium above or below the interface. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K - This quantity is defined as the temperature difference between a parcel of air lifted adiabatically from a starting air pressure to a finishing air pressure in the troposphere and the ambient air temperature at the finishing air pressure in the troposphere. It is -often called the lifted index (LI) and provides a measure of the instability of the atmosphere. The air parcel is "lifted" by moving the air parcel from the starting air pressure to the Lifting Condensation Level (dry adiabatically) and then from the Lifting Condensation Level to the finishing air pressure (wet adiabatically). Air temperature is -the bulk temperature of the air. Coordinate variables of original_air_pressure_of_lifted_parcel and -final_air_pressure_of_lifted_parcel should be specified to indicate the specific air pressures at which the parcel lifting starts (starting air pressure) and the temperature difference is calculated at (finishing air pressure), respectively. + This quantity is defined as the temperature difference between a parcel of air lifted adiabatically from a starting air pressure to a finishing air pressure in the troposphere and the ambient air temperature at the finishing air pressure in the troposphere. It is often called the lifted index (LI) and provides a measure of the instability of the atmosphere. The air parcel is "lifted" by moving the air parcel from the starting air pressure to the Lifting Condensation Level (dry adiabatically) and then from the Lifting Condensation Level to the finishing air pressure (wet adiabatically). Air temperature is the bulk temperature of the air. Coordinate variables of original_air_pressure_of_lifted_parcel and final_air_pressure_of_lifted_parcel should be specified to indicate the specific air pressures at which the parcel lifting starts (starting air pressure) and the temperature difference is calculated at (finishing air pressure), respectively. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K - This quantity is defined as the temperature difference between a parcel of air lifted adiabatically from the surface to a finishing air pressure in the troposphere and the ambient air temperature at the finishing air pressure in the troposphere. It is often called the lifted index (LI) and provides a measure of the instability of the atmosphere. The air parcel is "lifted" by moving the air parcel from the surface to the Lifting Condensation Level (dry adiabatically) and then from the Lifting Condensation Level to the finishing air pressure (wet adiabatically). Air temperature is the bulk temperature of the air, not the surface (skin) temperature. The term "surface" means the lower boundary of the atmosphere. A coordinate variable of final_air_pressure_of_lifted_parcel should be specified to indicate the specific air pressure that the temperature difference is calculated at. + This quantity is defined as the temperature difference between a parcel of air lifted adiabatically from the surface to a finishing air pressure in the troposphere and the ambient air temperature at the finishing air pressure in the troposphere. It is often called the lifted index (LI) and provides a measure of the instability of the atmosphere. The air parcel is "lifted" by moving the air parcel from the surface to the Lifting Condensation Level (dry adiabatically) and then from the Lifting Condensation Level to the finishing air pressure (wet adiabatically). Air temperature is the bulk temperature of the air, not the surface (skin) temperature. The term "surface" means the lower boundary of the atmosphere. A coordinate variable of final_air_pressure_of_lifted_parcel should be specified to indicate the specific air pressure that the temperature difference is calculated at. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). @@ -22734,21 +25674,35 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific W m-2 - Runoff is the liquid water which drains from land. If not specified, "runoff" refers to the sum of surface runoff and subsurface drainage. The quantity with standard name temperature_flux_due_to_runoff_expressed_as_heat_flux_into_sea_water is the heat carried by the transfer of water into the liquid ocean by the process of runoff. This quantity additonally includes melt water from sea ice and icebergs. It is calculated relative to the heat that would be transported by runoff water entering the sea at zero degrees Celsius. It is calculated as the product QrunoffCpTrunoff, where Q runoff is the mass flux of liquid runoff entering the sea water (kg m-2 s-1), Cp is the specific heat capacity of water, and Trunoff is the temperature in degrees Celsius of the runoff water. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. + The quantity with standard name temperature_flux_due_to_runoff_expressed_as_heat_flux_into_sea_water is the heat carried by the transfer of water into the liquid ocean by the process of runoff. This quantity additionally includes melt water from sea ice and icebergs. It is calculated relative to the heat that would be transported by runoff water entering the sea at zero degrees Celsius. It is calculated as the product QrunoffCpTrunoff, where Q runoff is the mass flux of liquid runoff entering the sea water (kg m-2 s-1), Cp is the specific heat capacity of water, and Trunoff is the temperature in degrees Celsius of the runoff water. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Runoff is the liquid water which drains from land. If not specified, "runoff" refers to the sum of surface runoff and subsurface drainage. + + + + K + + + The temperature at any given depth (or in a layer) below the surface of the ground, excluding surficial snow and ice (but not permafrost or soil). For temperatures in surface lying snow and ice, the more specific standard names temperature_in_surface_snow and land_ice_temperature should be used. For temperatures measured or modelled specifically for the soil layer (the near-surface layer where plants sink their roots) the standard name soil_temperature should be used. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K E238 - "Temperature in surface snow" is the bulk temperature of the snow, not the surface (skin) temperature. The surface called "surface" means the lower boundary of the atmosphere. + "Temperature in surface snow" is the bulk temperature of the snow, not the surface (skin) temperature. Surface snow refers to the snow on the solid ground or on surface ice cover, but excludes, for example, falling snowflakes and snow on plants. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). + + + + K + + + The temperature_of_analysis_of_sea_water is the reference temperature for the effects of temperature on the measurement of another variable. This temperature should be measured, but may have been calculated, or assumed. For example, the temperature of the sample when measuring pH, or the temperature of equilibration in the case of dissolved gases. The linkage between the data variable and the variable with a standard_name of temperature_of_analysis_of_sea_water is achieved using the ancillary_variables attribute on the data variable. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K - Temperature_of_sensor_for_oxygen_in_sea_water is the instrument temperature used in calculating the concentration of oxygen in sea water; it is not a measurement of the ambient water temperature. + Temperature_of_sensor_for_oxygen_in_sea_water is the instrument temperature used in calculating the concentration of oxygen in sea water; it is not a measurement of the ambient water temperature. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). @@ -22769,154 +25723,154 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific K s-1 - "tendency_of_X" means derivative of X with respect to time. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. + The phrase "tendency_of_X" means derivative of X with respect to time. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K s-1 - The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. + The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K s-1 - The phrase "tendency_of_X" means derivative of X with respect to time. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Boundary layer mixing" means turbulent motions that transport heat, water, momentum and chemical constituents within the atmospheric boundary layer and affect exchanges between the surface and the atmosphere. The atmospheric boundary layer is typically characterised by a well-mixed sub-cloud layer of order 500 metres, and by a more extended conditionally unstable layer with boundary-layer clouds up to 2 km. (Reference: IPCC Third Assessment Report, Working Group 1: The Scientific Basis, 7.2.2.3, https://www.ipcc.ch/ipccreports/tar/wg1/273.htm). + The phrase "tendency_of_X" means derivative of X with respect to time. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Boundary layer mixing" means turbulent motions that transport heat, water, momentum and chemical constituents within the atmospheric boundary layer and affect exchanges between the surface and the atmosphere. The atmospheric boundary layer is typically characterised by a well-mixed sub-cloud layer of order 500 metres, and by a more extended conditionally unstable layer with boundary-layer clouds up to 2 km. (Reference: IPCC Third Assessment Report, Working Group 1: The Scientific Basis, 7.2.2.3, https://archive.ipcc.ch/ipccreports/tar/wg1/273.htm). It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K s-1 - Air temperature is the bulk temperature of the air, not the surface (skin) temperature. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. + Air temperature is the bulk temperature of the air, not the surface (skin) temperature. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K s-1 tnt - The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. + The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K s-1 - The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. + The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K s-1 - The phrase "tendency_of_X" means derivative of X with respect to time. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Nonorographic" gravity waves refer to gravity waves which are not generated by flow over orography. The dissipation of gravity waves generates heating through an eddy heat flux convergence and through a viscous stress term. + The phrase "tendency_of_X" means derivative of X with respect to time. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Nonorographic" gravity waves refer to gravity waves which are not generated by flow over orography. The dissipation of gravity waves generates heating through an eddy heat flux convergence and through a viscous stress term. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K s-1 - The phrase "tendency_of_X" means derivative of X with respect to time. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Orographic gravity waves" refer to gravity waves which are generated by flow over orography. The dissipation of gravity waves generates heating through an eddy heat flux convergence and through a viscous stress term. + The phrase "tendency_of_X" means derivative of X with respect to time. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Orographic gravity waves" refer to gravity waves which are generated by flow over orography. The dissipation of gravity waves generates heating through an eddy heat flux convergence and through a viscous stress term. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K s-1 tntdc - The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. + The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K s-1 tntlw - The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. "longwave" means longwave radiation. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. + The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. "longwave" means longwave radiation. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K s-1 - The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. A phrase assuming_condition indicates that the named quantity is the value which would obtain if all aspects of the system were unaltered except for the assumption of the circumstances specified by the condition. "tendency_of_X" means derivative of X with respect to time. "longwave" means longwave radiation. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. + The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. A phrase assuming_condition indicates that the named quantity is the value which would obtain if all aspects of the system were unaltered except for the assumption of the circumstances specified by the condition. "tendency_of_X" means derivative of X with respect to time. "longwave" means longwave radiation. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K s-1 - The phrase "tendency_of_X" means derivative of X with respect to time. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. The term "longwave" means longwave radiation. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Ambient_aerosol" means that the aerosol is measured or modelled at the ambient state of pressure, temperature and relative humidity that exists in its immediate environment. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. To specify the relative humidity and temperature at which the quantity described by the standard name applies, provide scalar coordinate variables with standard names of "relative_humidity" and "air_temperature". Volcanic aerosols include both volcanic ash and secondary products such as sulphate aerosols formed from gaseous emissions of volcanic eruptions. + The phrase "tendency_of_X" means derivative of X with respect to time. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. The term "longwave" means longwave radiation. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Ambient_aerosol" means that the aerosol is measured or modelled at the ambient state of pressure, temperature and relative humidity that exists in its immediate environment. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. To specify the relative humidity and temperature at which the quantity described by the standard name applies, provide scalar coordinate variables with standard names of "relative_humidity" and "air_temperature". Volcanic aerosols include both volcanic ash and secondary products such as sulphate aerosols formed from gaseous emissions of volcanic eruptions. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K s-1 - Air temperature is the bulk temperature of the air, not the surface (skin) temperature. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. + Air temperature is the bulk temperature of the air, not the surface (skin) temperature. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K s-1 tntmc - The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. + The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K s-1 - The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. + The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K s-1 tntsw - The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. "shortwave" means shortwave radiation. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. + The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. "shortwave" means shortwave radiation. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K s-1 - The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. A phrase assuming_condition indicates that the named quantity is the value which would obtain if all aspects of the system were unaltered except for the assumption of the circumstances specified by the condition. "tendency_of_X" means derivative of X with respect to time. "shortwave" means shortwave radiation. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. + The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. A phrase assuming_condition indicates that the named quantity is the value which would obtain if all aspects of the system were unaltered except for the assumption of the circumstances specified by the condition. "tendency_of_X" means derivative of X with respect to time. "shortwave" means shortwave radiation. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K s-1 - The phrase "tendency_of_X" means derivative of X with respect to time. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. The term "shortwave" means shortwave radiation. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. To specify the relative humidity and temperature at which the quantity described by the standard name applies, provide scalar coordinate variables with standard names of "relative_humidity" and "air_temperature". Volcanic aerosols include both volcanic ash and secondary products such as sulphate aerosols formed from gaseous emissions of volcanic eruptions. + The phrase "tendency_of_X" means derivative of X with respect to time. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. The term "shortwave" means shortwave radiation. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. To specify the relative humidity and temperature at which the quantity described by the standard name applies, provide scalar coordinate variables with standard names of "relative_humidity" and "air_temperature". Volcanic aerosols include both volcanic ash and secondary products such as sulphate aerosols formed from gaseous emissions of volcanic eruptions. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K s-1 - The phrase "tendency_of_X" means derivative of X with respect to time. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). "Precipitation" in the earth's atmosphere means precipitation of water in all phases. A variable with the standard name tendency_of_air_temperature_due_to_stratiform_cloud_and_precipitation should contain net latent heating effects of all processes which convert stratiform clouds and precipitation between water vapor, liquid or ice phases. + The phrase "tendency_of_X" means derivative of X with respect to time. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). "Precipitation" in the earth's atmosphere means precipitation of water in all phases. A variable with the standard name tendency_of_air_temperature_due_to_stratiform_cloud_and_precipitation should contain net latent heating effects of all processes which convert stratiform clouds and precipitation between water vapor, liquid or ice phases. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K s-1 - The phrase "tendency_of_X" means derivative of X with respect to time. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). "Precipitation" in the earth's atmosphere means precipitation of water in all phases. "Boundary layer mixing" means turbulent motions that transport heat, water, momentum and chemical constituents within the atmospheric boundary layer and affect exchanges between the surface and the atmosphere. The atmospheric boundary layer is typically characterised by a well-mixed sub-cloud layer of order 500 metres, and by a more extended conditionally unstable layer with boundary-layer clouds up to 2 km. (Reference: IPCC Third Assessment Report, Working Group 1: The Scientific Basis, 7.2.2.3, https://www.ipcc.ch/ipccreports/tar/wg1/273.htm). + The phrase "tendency_of_X" means derivative of X with respect to time. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). "Precipitation" in the earth's atmosphere means precipitation of water in all phases. "Boundary layer mixing" means turbulent motions that transport heat, water, momentum and chemical constituents within the atmospheric boundary layer and affect exchanges between the surface and the atmosphere. The atmospheric boundary layer is typically characterised by a well-mixed sub-cloud layer of order 500 metres, and by a more extended conditionally unstable layer with boundary-layer clouds up to 2 km. (Reference: IPCC Third Assessment Report, Working Group 1: The Scientific Basis, 7.2.2.3, https://archive.ipcc.ch/ipccreports/tar/wg1/273.htm). It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K s-1 tntlsp - The phrase "tendency_of_X" means derivative of X with respect to time. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). "Precipitation" in the earth's atmosphere means precipitation of water in all phases. + The phrase "tendency_of_X" means derivative of X with respect to time. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). "Precipitation" in the earth's atmosphere means precipitation of water in all phases. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K s-1 - The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. + The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. Air temperature is the bulk temperature of the air, not the surface (skin) temperature. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). @@ -23497,49 +26451,49 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific kg m-2 s-1 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula of carbon tetrachloride is CCl4. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula of carbon tetrachloride is CCl4. The IUPAC name for carbon tetrachloride is tetrachloromethane. kg m-2 s-1 - The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula of CFC113a CCl3CF3. The IUPAC name for CFC113a is 1,1,1-trichloro-2,2,2-trifluoro-ethane. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula of CFC113a is CCl3CF3. The IUPAC name for CFC113a is 1,1,1-trichloro-2,2,2-trifluoroethane. kg m-2 s-1 - The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula of CFC113 is CCl2FCClF2. The IUPAC name for CFC113 is 1,1,2-trichloro-1,2,2-trifluoro-ethane. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula of CFC113 is CCl2FCClF2. The IUPAC name for CFC113 is 1,1,2-trichloro-1,2,2-trifluoroethane. kg m-2 s-1 - The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula of CFC114 is CClF2CClF2. The IUPAC name for CFC114 is 1,2-dichloro-1,1,2,2-tetrafluoro-ethane. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula of CFC114 is CClF2CClF2. The IUPAC name for CFC114 is 1,2-dichloro-1,1,2,2-tetrafluoroethane. kg m-2 s-1 - The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula of CFC115 is CClF2CF3. The IUPAC name for CFC115 is 1-chloro-1,1,2,2,2-pentafluoro-ethane. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer are used". The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula of CFC115 is CClF2CF3. The IUPAC name for CFC115 is 1-chloro-1,1,2,2,2-pentafluoroethane. kg m-2 s-1 - The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula of CFC11 is CFCl3. The IUPAC name fof CFC11 is trichloro-fluoro-methane. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula of CFC11 is CFCl3. The IUPAC name for CFC11 is trichloro(fluoro)methane. kg m-2 s-1 - The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for CFC12 is CF2Cl2. The IUPAC name for CFC12 is dichloro-difluoro-methane. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for CFC12 is CF2Cl2. The IUPAC name for CFC12 is dichloro(difluoro)methane. @@ -23672,7 +26626,7 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific kg m-2 s-1 - "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the particles. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol takes up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the aerosol. "Dry aerosol partilces" means aerosol particles without any water uptake. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. the surface of the earth). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. "Agricultural waste burning" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source category 4F as defined in the 2006 IPCC guidelines for national greenhouse gas inventories". Chemically, "elemental carbon" is the carbonaceous fraction of particulate matter that is thermally stable in an inert atmosphere to high temperatures near 4000K and can only be gasified by oxidation starting at temperatures above 340 C. It is assumed to be inert and non-volatile under atmospheric conditions and insoluble in any solvent (Ogren and Charlson, 1983). + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the particles. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol takes up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the aerosol. "Dry aerosol particles" means aerosol particles without any water uptake. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. the surface of the earth). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. "Agricultural waste burning" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source category 4F as defined in the 2006 IPCC guidelines for national greenhouse gas inventories". Chemically, "elemental carbon" is the carbonaceous fraction of particulate matter that is thermally stable in an inert atmosphere to high temperatures near 4000K and can only be gasified by oxidation starting at temperatures above 340 C. It is assumed to be inert and non-volatile under atmospheric conditions and insoluble in any solvent (Ogren and Charlson, 1983). @@ -24064,252 +27018,252 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific kg m-2 s-1 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. "Dry deposition" is the sum of turbulent deposition and gravitational settling. The chemical formula for formaldehyde is CH2O. The IUPAC name for formaldehyde is methanal. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Dry deposition" is the sum of turbulent deposition and gravitational settling. The chemical formula for formaldehyde is CH2O. The IUPAC name for formaldehyde is methanal. kg m-2 s-1 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for formaldehyde is CH2O. The IUPAC name for formaldehyde is methanal. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for formaldehyde is CH2O. The IUPAC name for formaldehyde is methanal. kg m-2 s-1 - "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for formaldehyde is CH2O. The IUPAC name for formaldehyde is methanal. The "agricultural production" sector comprises the agricultural processes of enteric fermentation, manure management, rice cultivation, agricultural soils and other. It may also include any not-classified or "other" combustion, which is commonly included in agriculture-related inventory data. "Agricultural production" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source categories 4A, 4B, 4C, 4D and 4G as defined in the 2006 IPCC guidelines for national greenhouse gas inventories". + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for formaldehyde is CH2O. The IUPAC name for formaldehyde is methanal. The "agricultural production" sector comprises the agricultural processes of enteric fermentation, manure management, rice cultivation, agricultural soils and other. It may also include any not-classified or "other" combustion, which is commonly included in agriculture-related inventory data. "Agricultural production" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source categories 4A, 4B, 4C, 4D and 4G as defined in the 2006 IPCC guidelines for national greenhouse gas inventories". kg m-2 s-1 - "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for formaldehyde is CH2O. The IUPAC name for formaldehyde is methanal. The "agricultural waste burning" sector comprises field burning of agricultural residues. "Agricultural waste burning" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source category 4F as defined in the 2006 IPCC guidelines for national greenhouse gas inventories". + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for formaldehyde is CH2O. The IUPAC name for formaldehyde is methanal. The "agricultural waste burning" sector comprises field burning of agricultural residues. "Agricultural waste burning" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source category 4F as defined in the 2006 IPCC guidelines for national greenhouse gas inventories". kg m-2 s-1 - "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for formaldehyde is CH2O. The IUPAC name for formaldehyde is methanal. The "energy production and distribution" sector comprises fuel combustion activities related to energy industries and fugitive emissions from fuels. It may also include any not-classified or "other" combustion, which is commonly included in energy-related inventory data. "Energy production and distribution" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source categories 1A1 and 1B as defined in the 2006 IPCC guidelines for national greenhouse gas inventories". + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for formaldehyde is CH2O. The IUPAC name for formaldehyde is methanal. The "energy production and distribution" sector comprises fuel combustion activities related to energy industries and fugitive emissions from fuels. It may also include any not-classified or "other" combustion, which is commonly included in energy-related inventory data. "Energy production and distribution" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source categories 1A1 and 1B as defined in the 2006 IPCC guidelines for national greenhouse gas inventories". kg m-2 s-1 - "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for formaldehyde is CH2O. The IUPAC name for formaldehyde is methanal. The "forest fires" sector comprises the burning (natural and human-induced) of living or dead vegetation in forests. "Forest fires" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source category 5 as defined in the 2006 IPCC guidelines for national greenhouse gas Inventories". + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for formaldehyde is CH2O. The IUPAC name for formaldehyde is methanal. The "forest fires" sector comprises the burning (natural and human-induced) of living or dead vegetation in forests. "Forest fires" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source category 5 as defined in the 2006 IPCC guidelines for national greenhouse gas Inventories". kg m-2 s-1 - "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for formaldehyde is CH2O. The IUPAC name for formaldehyde is methanal. The "industrial processes and combustion" sector comprises fuel combustion activities related to manufacturing industries and construction, industrial processes related to mineral products, the chemical industry, metal production, the production of pulp, paper, food and drink, and non-energy industry use of lubricants and waxes. It may also include any not-classified or "other" combustion, which is commonly included in industry-related inventory data. "Industrial processes and combustion" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source categories 1A2, 2A, 2B, 2C, 2D and 2G as defined in the 2006 IPCC guidelines for national greenhouse gas inventories". + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for formaldehyde is CH2O. The IUPAC name for formaldehyde is methanal. The "industrial processes and combustion" sector comprises fuel combustion activities related to manufacturing industries and construction, industrial processes related to mineral products, the chemical industry, metal production, the production of pulp, paper, food and drink, and non-energy industry use of lubricants and waxes. It may also include any not-classified or "other" combustion, which is commonly included in industry-related inventory data. "Industrial processes and combustion" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source categories 1A2, 2A, 2B, 2C, 2D and 2G as defined in the 2006 IPCC guidelines for national greenhouse gas inventories". kg m-2 s-1 - "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for formaldehyde is CH2O. The IUPAC name for formaldehyde is methanal. The "land transport" sector includes fuel combustion activities related to road transportation, railways and other transportation. "Land transport" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source categories 1A3b, 1A3c and 1A3e as defined in the 2006 IPCC guidelines for national greenhouse gas inventories". + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for formaldehyde is CH2O. The IUPAC name for formaldehyde is methanal. The "land transport" sector includes fuel combustion activities related to road transportation, railways and other transportation. "Land transport" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source categories 1A3b, 1A3c and 1A3e as defined in the 2006 IPCC guidelines for national greenhouse gas inventories". kg m-2 s-1 - "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for formaldehyde is CH2O. The IUPAC name for formaldehyde is methanal. The "residential and commercial combustion" sector comprises fuel combustion activities related to the commercial/institutional sector, the residential sector and the agriculture/forestry/fishing sector. It may also include any not-classified or "other" combustion, which is commonly included in the inventory data. "Residential and commercial combustion" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source categories 1A4a, 1A4b and 1A4c as defined in the 2006 IPCC guidelines for national greenhouse gas inventories". + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for formaldehyde is CH2O. The IUPAC name for formaldehyde is methanal. The "residential and commercial combustion" sector comprises fuel combustion activities related to the commercial/institutional sector, the residential sector and the agriculture/forestry/fishing sector. It may also include any not-classified or "other" combustion, which is commonly included in the inventory data. "Residential and commercial combustion" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source categories 1A4a, 1A4b and 1A4c as defined in the 2006 IPCC guidelines for national greenhouse gas inventories". kg m-2 s-1 - "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for formaldehyde is CH2O. The IUPAC name for formaldehyde is methanal. The "savanna and grassland fires" sector comprises the burning (natural and human-induced) of living or dead vegetation in non-forested areas. It excludes field burning of agricultural residues. "Savanna and grassland fires" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source category 5 as defined in the 2006 IPCC guidelines for national greenhouse gas Inventories". + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for formaldehyde is CH2O. The IUPAC name for formaldehyde is methanal. The "savanna and grassland fires" sector comprises the burning (natural and human-induced) of living or dead vegetation in non-forested areas. It excludes field burning of agricultural residues. "Savanna and grassland fires" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source category 5 as defined in the 2006 IPCC guidelines for national greenhouse gas Inventories". kg m-2 s-1 - "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for formaldehyde is CH2O. The IUPAC name for formaldehyde is methanal. The "waste treatment and disposal" sector comprises solid waste disposal on land, wastewater handling, waste incineration and other waste disposal. "Waste treatment and disposal" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source categories 6A, 6B, 6C and 6D as defined in the 2006 IPCC guidelines for national greenhouse gas inventories". + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for formaldehyde is CH2O. The IUPAC name for formaldehyde is methanal. The "waste treatment and disposal" sector comprises solid waste disposal on land, wastewater handling, waste incineration and other waste disposal. "Waste treatment and disposal" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source categories 6A, 6B, 6C and 6D as defined in the 2006 IPCC guidelines for national greenhouse gas inventories". kg m-2 s-1 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Wet deposition" means deposition by precipitation. "tendency_of_X" means derivative of X with respect to time. The chemical formula for formaldehyde is CH2O. The IUPAC name for formaldehyde is methanal. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Wet deposition" means deposition by precipitation. The chemical formula for formaldehyde is CH2O. The IUPAC name for formaldehyde is methanal. kg m-2 s-1 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. "Dry deposition" is the sum of turbulent deposition and gravitational settling. The chemical formula for formic acid is HCOOH. The IUPAC name for formic acid is methanoic acid. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Dry deposition" is the sum of turbulent deposition and gravitational settling. The chemical formula for formic acid is HCOOH. The IUPAC name for formic acid is methanoic acid. kg m-2 s-1 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. "Wet deposition" means deposition by precipitation. The chemical formula for formic acid is HCOOH. The IUPAC name for formic acid is methanoic acid. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Wet deposition" means deposition by precipitation. The chemical formula for formic acid is HCOOH. The IUPAC name for formic acid is methanoic acid. kg m-2 s-1 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. "Divalent mercury" means all compounds in which the mercury has two binding sites to other ion(s) in a salt or to other atom(s) in a molecule. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. "Dry deposition"is the sum of turbulent deposition and gravitational settling. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. "Divalent mercury" means all compounds in which the mercury has two binding sites to other ion(s) in a salt or to other atom(s) in a molecule. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Dry deposition" is the sum of turbulent deposition and gravitational settling. kg m-2 s-1 - "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. "Divalent mercury" means all compounds in which the mercury has two binding sites to other ion(s) in a salt or to other atom(s) in a molecule. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. "Divalent mercury" means all compounds in which the mercury has two binding sites to other ion(s) in a salt or to other atom(s) in a molecule. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. kg m-2 s-1 - "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. "Divalent mercury" means all compounds in which the mercury has two binding sites to other ion(s) in a salt or to other atom(s) in a molecule. "Wet deposition" means deposition by precipitation. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Wet deposition" means deposition by precipitation. "Divalent mercury" means all compounds in which the mercury has two binding sites to other ion(s) in a salt or to other atom(s) in a molecule. kg m-2 s-1 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. "Dry deposition"is the sum of turbulent deposition and gravitational settling. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Dry deposition"is the sum of turbulent deposition and gravitational settling. kg m-2 s-1 - "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. kg m-2 s-1 - "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. "Wet deposition" means deposition by precipitation. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Wet deposition" means deposition by precipitation. kg m-2 s-1 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for halon1202 is CBr2F2. The IUPAC name for halon1202 is dibromo-difluoro-methane. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for Halon1202 is CBr2F2. The IUPAC name for Halon1202 is dibromo(difluoro)methane. kg m-2 s-1 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for halon1211 is CBrClF2. The IUPAC name for halon1211 is bromo-chloro-difluoro-methane. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for Halon1211 is CBrClF2. The IUPAC name for Halon1211 is bromo-chloro-difluoromethane. kg m-2 s-1 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for halon1301 is CBrF3. The IUPAC name for halon1301 is bromo-trifluoro-methane. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for Halon1301 is CBrF3. The IUPAC name for Halon1301 is bromo(trifluoro)methane. kg m-2 s-1 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for halo2402 is C2Br2F4. The IUPAC name for halon2402 is 1,2-dibromo-1,1,2,2-tetrafluoro-ethane. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for Halon2402 is C2Br2F4. The IUPAC name for Halon2402 is 1,2-dibromo-1,1,2,2-tetrafluoroethane. kg m-2 s-1 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for hcc140a is CH3CCl3. The IUPAC name for hcc140a is 1,1,1-trichloro-ethane. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The chemical formula for HCC140a, also called methyl chloroform, is CH3CCl3. The IUPAC name for HCC140a is 1,1,1-trichloroethane. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. kg m-2 s-1 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for HCFC141b is CH3CCl2F. The IUPAC name for HCFC141b is 1,1-dichloro-1-fluoroethane. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for HCFC141b is CH3CCl2F. The IUPAC name for HCFC141b is 1,1-dichloro-1-fluoroethane. kg m-2 s-1 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for HCFC142b is CH3CClF2. The IUPAC name for HCFC142b is 1-chloro-1,1-difluoroethane. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for HCFC142b is CH3CClF2. The IUPAC name for HCFC142b is 1-chloro-1,1-difluoroethane. kg m-2 s-1 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for HCFC22 is CHClF2. The IUPAC name for HCFC22 is chloro-difluoro-methane. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The chemical formula for HCFC22 is CHClF2. The IUPAC name for HCFC22 is chloro(difluoro)methane. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. kg m-2 s-1 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. "Dry deposition"is the sum of turbulent deposition and gravitational settling. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Dry deposition" is the sum of turbulent deposition and gravitational settling. The chemical formula for hexachlorobiphenyl is C12H4Cl6. The structure of this species consists of two linked benzene rings, each of which is additionally bonded to three chlorine atoms. kg m-2 s-1 - "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for hexachlorobiphenyl is C12H4Cl6. The structure of this species consists of two linked benzene rings, each of which is additionally bonded to three chlorine atoms. kg m-2 s-1 - "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. "Re-emission" refers to emission that is not from a primary source; it refers to emission of a species that has previously been deposited and accumulated in soils or water. "Re-emission" is a process entirely distinct from "emission" which is used in some standard names. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Re-emission" refers to emission that is not from a primary source; it refers to emission of a species that has previously been deposited and accumulated in soils or water. "Re-emission" is a process entirely distinct from "emission" which is used in some standard names. The chemical formula for hexachlorobiphenyl is C12H4Cl6. The structure of this species consists of two linked benzene rings, each of which is additionally bonded to three chlorine atoms. kg m-2 s-1 - "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. "Wet deposition" means deposition by precipitation. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Wet deposition" means deposition by precipitation. The chemical formula for hexachlorobiphenyl is C12H4Cl6. The structure of this species consists of two linked benzene rings, each of which is additionally bonded to three chlorine atoms. kg m-2 s-1 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. "Dry deposition" is the sum of turbulent deposition and gravitational settling. The chemical formula for hydrogen cyanide is HCN. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Dry deposition" is the sum of turbulent deposition and gravitational settling. The chemical formula for hydrogen cyanide is HCN. kg m-2 s-1 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for hydrogen cyanide is HCN. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for hydrogen cyanide is HCN. kg m-2 s-1 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. "Dry deposition" is the sum of turbulent deposition and gravitational settling. The chemical formula for hydrogen peroxide is H2O2. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Dry deposition" is the sum of turbulent deposition and gravitational settling. The chemical formula for hydrogen peroxide is H2O2. kg m-2 s-1 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Wet deposition" means deposition by precipitation. "tendency_of_X" means derivative of X with respect to time. The chemical formula for hydrogen peroxide is H2O2. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Wet deposition" means deposition by precipitation. The chemical formula for hydrogen peroxide is H2O2. @@ -24323,21 +27277,21 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific kg m-2 s-1 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for isoprene is CH2=C(CH3)CH=CH2. The IUPAC name for isoprene is 2-methyl-buta-1,3-diene. Isoprene is a member of the group of hydrocarbons known as terpenes. There are standard names for the terpene group as well as for some of the individual species. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The chemical formula for isoprene is CH2=C(CH3)CH=CH2. The IUPAC name for isoprene is 2-methylbuta-1,3-diene. Isoprene is a member of the group of hydrocarbons known as terpenes. There are standard names for the terpene group as well as for some of the individual species. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. kg m-2 s-1 - "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for isoprene is CH2=C(CH3)CH=CH2. The IUPAC name for isoprene is 2-methyl-buta-1,3-diene. Isoprene is a member of the group of hydrocarbons known as terpenes. There are standard names for the terpene group as well as for some of the individual species. The "forest fires" sector comprises the burning (natural and human-induced) of living or dead vegetation in forests. "Forest fires" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source category 5 as defined in the 2006 IPCC guidelines for national greenhouse gas inventories". + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The chemical formula for isoprene is CH2=C(CH3)CH=CH2. The IUPAC name for isoprene is 2-methylbuta-1,3-diene. Isoprene is a member of the group of hydrocarbons known as terpenes. There are standard names for the terpene group as well as for some of the individual species. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The "forest fires" sector comprises the burning (natural and human-induced) of living or dead vegetation in forests. "Forest fires" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source category 5 as defined in the 2006 IPCC guidelines for national greenhouse gas inventories". kg m-2 s-1 - "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for isoprene is CH2=C(CH3)CH=CH2. The IUPAC name for isoprene is 2-methyl-buta-1,3-diene. Isoprene is a member of the group of hydrocarbons known as terpenes. There are standard names for the terpene group as well as for some of the individual species. The "savanna and grassland fires" sector comprises the burning (natural and human-induced) of living or dead vegetation in non-forested areas. It excludes field burning of agricultural residues. "Savanna and grassland fires" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source category 5 as defined in the 2006 IPCC guidelines for national greenhouse gas inventories". + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The chemical formula for isoprene is CH2=C(CH3)CH=CH2. The IUPAC name for isoprene is 2-methylbuta-1,3-diene. Isoprene is a member of the group of hydrocarbons known as terpenes. There are standard names for the terpene group as well as for some of the individual species. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The "savanna and grassland fires" sector comprises the burning (natural and human-induced) of living or dead vegetation in non-forested areas. It excludes field burning of agricultural residues. "Savanna and grassland fires" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source category 5 as defined in the 2006 IPCC guidelines for national greenhouse gas inventories". @@ -24414,21 +27368,21 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific kg m-2 s-1 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for limonene is C10H16. The IUPAC name for limonene is 1-methyl-4-prop-1-en-2-yl-cyclohexene. Limonene is a member of the group of hydrocarbons known as terpenes. There are standard names for the terpene group as well as for some of the individual species. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The chemical formula for limonene is C10H16. The IUPAC name for limonene is 1-methyl-4-prop-1-en-2-ylcyclohexene. Limonene is a member of the group of hydrocarbons known as terpenes. There are standard names for the terpene group as well as for some of the individual species. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. - + kg m-2 s-1 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. the surface of the earth). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. "tendency_of_X" means derivative of X with respect to time. + "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The mass is the total mass of the particles. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Dry deposition" is the sum of turbulent deposition and gravitational settling. "tendency_of_X" means derivative of X with respect to time. - + kg m-2 s-1 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The mass is the total mass of the particles. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Dry deposition" is the sum of turbulent deposition and gravitational settling. "tendency_of_X" means derivative of X with respect to time. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. the surface of the earth). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. @@ -24659,7 +27613,7 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific kg m-2 s-1 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. "tendency_of_X" means derivative of X with respect to time. The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "wet_deposition" means deposition by precipitation. "Nitrogen compounds" summarizes all chemical species containing nitrogen atoms. Usually, particle bound and gaseous nitrogen compounds, such as atomic nitrogen (N), nitrogen monoxide (NO), nitrogen dioxide (NO2), dinitrogen pentoxide (N2O5), nitric acid (HNO3), nitrate (NO3-), peroxynitric acid (HNO4), ammon1a (NH3), ammonium (NH4+), bromine nitrate (BrONO2), chlorine nitrate (ClONO2) and organic nitrates (most notably peroxyacetyl nitrate, sometimes referred to as PAN, (CH3COO2NO2)) are included. The list of individual species that are included in this quantity can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Wet deposition" means deposition by precipitation. "Nitrogen compounds" summarizes all chemical species containing nitrogen atoms. Usually, particle bound and gaseous nitrogen compounds, such as atomic nitrogen (N), nitrogen monoxide (NO), nitrogen dioxide (NO2), dinitrogen pentoxide (N2O5), nitric acid (HNO3), nitrate (NO3-), peroxynitric acid (HNO4), ammonia (NH3), ammonium (NH4+), bromine nitrate (BrONO2), chlorine nitrate (ClONO2) and organic nitrates (most notably peroxyacetyl nitrate, sometimes referred to as PAN, (CH3COO2NO2)) are included. The list of individual species that are included in this quantity can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. @@ -25712,13 +28666,6 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. the surface of the earth). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. "tendency_of_X" means derivative of X with respect to time. Terpenes are hydrocarbons, that is, they contain only hydrogen and carbon combined in the general proportions (C5H8)n where n is an integer greater than on equal to one. Sesquiterpenes are a class of terpenes that consist of three isoprene units and have the molecular formula C15H24. Terpenes are hydrocarbons. The term "sesquiterpenes" is used in standard names to describe the group of chemical species having this common structure that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. - - kg m-2 s-1 - - - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Wet deposition" means deposition by precipitation. "tendency_of_X" means derivative of X with respect to time. The chemical formula for the sulfate anion is SO4(2-). - - kg m-2 s-1 @@ -25775,6 +28722,13 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. The sum of turbulent deposition and gravitational settling is dry deposition. "tendency_of_X" means derivative of X with respect to time. The chemical formula for the sulfate anion is SO4(2-). + + kg m-2 s-1 + + + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer are used". "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Wet deposition" means deposition by precipitation. The chemical formula for the sulfate anion is SO4(2-). + + kg m-2 s-1 @@ -25877,84 +28831,84 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific kg m-2 s-1 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for toluene is C6H5CH3. Toluene has the same structure as benzene, except that one of the hydrogen atoms is replaced by a methyl group. The systematic name for toluene is methylbenzene. + "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for toluene is C6H5CH3. Toluene has the same structure as benzene, except that one of the hydrogen atoms is replaced by a methyl group. The IUPAC name for toluene is methylbenzene. kg m-2 s-1 - "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for toluene is C6H5CH3. Toluene has the same structure as benzene, except that one of the hydrogen atoms is replaced by a methyl group. The systematic name for toluene is methylbenzene. The "agricultural production" sector comprises the agricultural processes of enteric fermentation, manure management, rice cultivation, agricultural soils and other. It may also include any not-classified or "other" combustion, which is commonly included in agriculture-related inventory data. "Agricultural production" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source categories 4A, 4B, 4C, 4D and 4G as defined in the 2006 IPCC guidelines for national greenhouse gas inventories". + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for toluene is C6H5CH3. Toluene has the same structure as benzene, except that one of the hydrogen atoms is replaced by a methyl group. The IUPAC name for toluene is methylbenzene. The "agricultural production" sector comprises the agricultural processes of enteric fermentation, manure management, rice cultivation, agricultural soils and other. It may also include any not-classified or "other" combustion, which is commonly included in agriculture-related inventory data. "Agricultural production" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source categories 4A, 4B, 4C, 4D and 4G as defined in the 2006 IPCC guidelines for national greenhouse gas inventories". kg m-2 s-1 - "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for toluene is C6H5CH3. Toluene has the same structure as benzene, except that one of the hydrogen atoms is replaced by a methyl group. The systematic name for toluene is methylbenzene. The "agricultural waste burning" sector comprises field burning of agricultural residues. "Agricultural waste burning" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source category 4F as defined in the 2006 IPCC guidelines for national greenhouse gas inventories". + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for toluene is C6H5CH3. Toluene has the same structure as benzene, except that one of the hydrogen atoms is replaced by a methyl group. The IUPAC name for toluene is methylbenzene. The "agricultural waste burning" sector comprises field burning of agricultural residues. "Agricultural waste burning" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source category 4F as defined in the 2006 IPCC guidelines for national greenhouse gas inventories". kg m-2 s-1 - "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for toluene is C6H5CH3. Toluene has the same structure as benzene, except that one of the hydrogen atoms is replaced by a methyl group. The systematic name for toluene is methylbenzene. The "energy production and distribution" sector comprises fuel combustion activities related to energy industries and fugitive emissions from fuels. It may also include any not-classified or "other" combustion, which is commonly included in energy-related inventory data. "Energy production and distribution" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source categories 1A1 and 1B as defined in the 2006 IPCC guidelines for national greenhouse gas inventories". + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for toluene is C6H5CH3. Toluene has the same structure as benzene, except that one of the hydrogen atoms is replaced by a methyl group. The IUPAC name for toluene is methylbenzene. The "energy production and distribution" sector comprises fuel combustion activities related to energy industries and fugitive emissions from fuels. It may also include any not-classified or "other" combustion, which is commonly included in energy-related inventory data. "Energy production and distribution" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source categories 1A1 and 1B as defined in the 2006 IPCC guidelines for national greenhouse gas inventories". kg m-2 s-1 - "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for toluene is C6H5CH3. Toluene has the same structure as benzene, except that one of the hydrogen atoms is replaced by a methyl group. The systematic name for toluene is methylbenzene. The "forest fires" sector comprises the burning (natural and human-induced) of living or dead vegetation in forests. "Forest fires" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source category 5 as defined in the 2006 IPCC guidelines for national greenhouse gas Inventories". + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for toluene is C6H5CH3. Toluene has the same structure as benzene, except that one of the hydrogen atoms is replaced by a methyl group. The IUPAC name for toluene is methylbenzene. The "forest fires" sector comprises the burning (natural and human-induced) of living or dead vegetation in forests. "Forest fires" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source category 5 as defined in the 2006 IPCC guidelines for national greenhouse gas Inventories". kg m-2 s-1 - "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for toluene is C6H5CH3. Toluene has the same structure as benzene, except that one of the hydrogen atoms is replaced by a methyl group. The systematic name for toluene is methylbenzene. The "industrial processes and combustion" sector comprises fuel combustion activities related to manufacturing industries and construction, industrial processes related to mineral products, the chemical industry, metal production, the production of pulp, paper, food and drink, and non-energy industry use of lubricants and waxes. It may also include any not-classified or "other" combustion, which is commonly included in industry-related inventory data. "Industrial processes and combustion" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source categories 1A2, 2A, 2B, 2C, 2D and 2G as defined in the 2006 IPCC guidelines for national greenhouse gas inventories". + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for toluene is C6H5CH3. Toluene has the same structure as benzene, except that one of the hydrogen atoms is replaced by a methyl group. The IUPAC name for toluene is methylbenzene. The "industrial processes and combustion" sector comprises fuel combustion activities related to manufacturing industries and construction, industrial processes related to mineral products, the chemical industry, metal production, the production of pulp, paper, food and drink, and non-energy industry use of lubricants and waxes. It may also include any not-classified or "other" combustion, which is commonly included in industry-related inventory data. "Industrial processes and combustion" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source categories 1A2, 2A, 2B, 2C, 2D and 2G as defined in the 2006 IPCC guidelines for national greenhouse gas inventories". kg m-2 s-1 - "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for toluene is C6H5CH3. Toluene has the same structure as benzene, except that one of the hydrogen atoms is replaced by a methyl group. The systematic name for toluene is methylbenzene. The "land transport" sector includes fuel combustion activities related to road transportation, railways and other transportation. "Land transport" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source categories 1A3b, 1A3c and 1A3e as defined in the 2006 IPCC guidelines for national greenhouse gas inventories". + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for toluene is C6H5CH3. Toluene has the same structure as benzene, except that one of the hydrogen atoms is replaced by a methyl group. The IUPAC name for toluene is methylbenzene. The "land transport" sector includes fuel combustion activities related to road transportation, railways and other transportation. "Land transport" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source categories 1A3b, 1A3c and 1A3e as defined in the 2006 IPCC guidelines for national greenhouse gas inventories". kg m-2 s-1 - "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for toluene is C6H5CH3. Toluene has the same structure as benzene, except that one of the hydrogen atoms is replaced by a methyl group. The systematic name for toluene is methylbenzene. The "maritime transport" sector includes fuel combustion activities related to maritime transport. "Maritime transport" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source category 1A3d as defined in the 2006 IPCC guidelines for national greenhouse gas Inventories". + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for toluene is C6H5CH3. Toluene has the same structure as benzene, except that one of the hydrogen atoms is replaced by a methyl group. The IUPAC name for toluene is methylbenzene. The "maritime transport" sector includes fuel combustion activities related to maritime transport. "Maritime transport" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source category 1A3d as defined in the 2006 IPCC guidelines for national greenhouse gas Inventories". kg m-2 s-1 - "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for toluene is C6H5CH3. Toluene has the same structure as benzene, except that one of the hydrogen atoms is replaced by a methyl group. The systematic name for toluene is methylbenzene. The "residential and commercial combustion" sector comprises fuel combustion activities related to the commercial/institutional sector, the residential sector and the agriculture/forestry/fishing sector. It may also include any not-classified or "other" combustion, which is commonly included in the inventory data. "Residential and commercial combustion" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source categories 1A4a, 1A4b and 1A4c as defined in the 2006 IPCC guidelines for national greenhouse gas inventories". + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for toluene is C6H5CH3. Toluene has the same structure as benzene, except that one of the hydrogen atoms is replaced by a methyl group. The IUPAC name for toluene is methylbenzene. The "residential and commercial combustion" sector comprises fuel combustion activities related to the commercial/institutional sector, the residential sector and the agriculture/forestry/fishing sector. It may also include any not-classified or "other" combustion, which is commonly included in the inventory data. "Residential and commercial combustion" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source categories 1A4a, 1A4b and 1A4c as defined in the 2006 IPCC guidelines for national greenhouse gas inventories". kg m-2 s-1 - "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for toluene is C6H5CH3. Toluene has the same structure as benzene, except that one of the hydrogen atoms is replaced by a methyl group. The systematic name for toluene is methylbenzene. The "savanna and grassland fires" sector comprises the burning (natural and human-induced) of living or dead vegetation in non-forested areas. It excludes field burning of agricultural residues. "Savanna and grassland fires" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source category 5 as defined in the 2006 IPCC guidelines for national greenhouse gas Inventories". + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for toluene is C6H5CH3. Toluene has the same structure as benzene, except that one of the hydrogen atoms is replaced by a methyl group. The IUPAC name for toluene is methylbenzene. The "savanna and grassland fires" sector comprises the burning (natural and human-induced) of living or dead vegetation in non-forested areas. It excludes field burning of agricultural residues. "Savanna and grassland fires" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source category 5 as defined in the 2006 IPCC guidelines for national greenhouse gas Inventories". kg m-2 s-1 - "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for toluene is C6H5CH3. Toluene has the same structure as benzene, except that one of the hydrogen atoms is replaced by a methyl group. The systematic name for toluene is methylbenzene. The "solvent production and use" sector comprises industrial processes related to the consumption of halocarbons, SF6, solvent and other product use. "Solvent production and use" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source categories 2F and 3 as defined in the 2006 IPCC guidelines for national greenhouse gas inventories". + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for toluene is C6H5CH3. Toluene has the same structure as benzene, except that one of the hydrogen atoms is replaced by a methyl group. The IUPAC name for toluene is methylbenzene. The "solvent production and use" sector comprises industrial processes related to the consumption of halocarbons, SF6, solvent and other product use. "Solvent production and use" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source categories 2F and 3 as defined in the 2006 IPCC guidelines for national greenhouse gas inventories". kg m-2 s-1 - "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for toluene is C6H5CH3. Toluene has the same structure as benzene, except that one of the hydrogen atoms is replaced by a methyl group. The systematic name for toluene is methylbenzene. The "waste treatment and disposal" sector comprises solid waste disposal on land, wastewater handling, waste incineration and other waste disposal. "Waste treatment and disposal" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source categories 6A, 6B, 6C and 6D as defined in the 2006 IPCC guidelines for national greenhouse gas inventories". + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The mass is the total mass of the molecules. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Emission" means emission from a primary source located anywhere within the atmosphere, including at the lower boundary (i.e. earth's surface). "Emission" is a process entirely distinct from "re-emission" which is used in some standard names. The chemical formula for toluene is C6H5CH3. Toluene has the same structure as benzene, except that one of the hydrogen atoms is replaced by a methyl group. The IUPAC name for toluene is methylbenzene. The "waste treatment and disposal" sector comprises solid waste disposal on land, wastewater handling, waste incineration and other waste disposal. "Waste treatment and disposal" is the term used in standard names to describe a collection of emission sources. A variable which has this value for the standard_name attribute should be accompanied by a comment attribute which lists the source categories and provides a reference to the categorization scheme, for example, "IPCC (Intergovernmental Panel on Climate Change) source categories 6A, 6B, 6C and 6D as defined in the 2006 IPCC guidelines for national greenhouse gas inventories". @@ -26017,14 +28971,14 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific kg m-2 s-1 - The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. Atmosphere water vapor content is sometimes referred to as "precipitable water", although this term does not imply the water could all be precipitated. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Sublimation is the conversion of solid into vapor. The phrase "surface_snow" means snow lying on the surface. Unless indicated in the cell_methods attribute, a quantity is assumed to apply to the whole area of each horizontal grid box. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. Atmosphere water vapor content is sometimes referred to as "precipitable water", although this term does not imply the water could all be precipitated. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Sublimation is the conversion of solid into vapor. Surface snow refers to the snow on the solid ground or on surface ice cover, but excludes, for example, falling snowflakes and snow on plants. Unless indicated in the cell_methods attribute, a quantity is assumed to apply to the whole area of each horizontal grid box. kg m-2 s-1 - The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. Atmosphere water vapor content is sometimes referred to as "precipitable water", although this term does not imply the water could all be precipitated. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Sublimation is the conversion of solid into vapor. The phrase "surface_snow" means snow lying on the surface. Unless indicated in the cell_methods attribute, a quantity is assumed to apply to the whole area of each horizontal grid box. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. Atmosphere water vapor content is sometimes referred to as "precipitable water", although this term does not imply the water could all be precipitated. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Sublimation is the conversion of solid into vapor. Surface snow refers to the snow on the solid ground or on surface ice cover, but excludes, for example, falling snowflakes and snow on plants. Unless indicated in the cell_methods attribute, a quantity is assumed to apply to the whole area of each horizontal grid box. @@ -26133,14 +29087,14 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific - mole m-3 s-1 + mol m-3 s-1 Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Chemical destruction" means the result of all chemical reactions within the medium (here, atmosphere) that remove a certain amount of a particular species from the medium. "tendency_of_X" means derivative of X with respect to time. The chemical formula of carbon monoxide is CO. - mole m-3 s-1 + mol m-3 s-1 Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Chemical destruction" means the result of all chemical reactions within the medium (here, atmosphere) that remove a certain amount of a particular species from the medium. "tendency_of_X" means derivative of X with respect to time. The chemical formula for methane is CH4. Methane is a member of the group of hydrocarbons known as alkanes. There are standard names for the alkane group as well as for some of the individual species. @@ -26154,14 +29108,14 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific - mole m-3 s-1 + mol m-3 s-1 Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "tendency_of_X" means derivative of X with respect to time. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Chemical destruction" means the result of all chemical reactions within the medium (here, atmosphere) that remove a certain amount of a particular species from the medium. The chemical formula for ozone is O3. The IUPAC name for ozone is trioxygen. - mole m-3 s-1 + mol m-3 s-1 Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Chemical production" means the result of all chemical reactions within the medium (here, atmosphere) that produce a certain amount of the particular species. "tendency_of_X" means derivative of X with respect to time. The chemical formula for ozone is O3. The IUPAC name for ozone is trioxygen. @@ -26276,7 +29230,7 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific mol s-1 - "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. "Brox" describes a family of chemical species consisting of inorganic bromine compounds with the exception of hydrogen bromide (HBr) and bromine nitrate (BrONO2). "Brox" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity with a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. "Inorganic bromine", sometimes referred to as Bry, describes a family of chemical species which result from the degradation of source gases containing bromine (halons, methyl bromide, VSLS) and natural inorganic bromine sources such as volcanoes, seasalt and other aerosols. Standard names that use the term "inorganic_bromine" are used for quantities that contain all inorganic bromine species including HCl and ClONO2. The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. + The phrase "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. "Brox" describes a family of chemical species consisting of inorganic bromine compounds with the exception of hydrogen bromide (HBr) and bromine nitrate (BrONO2). The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. "Brox" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity with a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. "Inorganic bromine", sometimes referred to as Bry, describes a family of chemical species which result from the degradation of source gases containing bromine (halons, methyl bromide, VSLS) and natural inorganic bromine sources such as volcanoes, sea salt and other aerosols. Standard names that use the term "inorganic_bromine" are used for quantities that contain all inorganic bromine species including HCl and ClONO2. @@ -26297,56 +29251,56 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific mol s-1 - "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere,i.e. summed over the atmospheric column and over the entire globe. The chemical formula ofcarbon monoxide is CO. + The phrase "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula of carbon monoxide is CO. mol s-1 - "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere,i.e. summed over the atmospheric column and over the entire globe. The chemical formula ofcarbon tetrachloride is CCl4. + The phrase "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula of carbon tetrachloride is CCl4. The IUPAC name for carbon tetrachloride is tetrachloromethane. mol s-1 - "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere,i.e. summed over the atmospheric column and over the entire globe. The chemical formula ofCFC11 is CFCl3. The IUPAC name fof CFC11 is trichloro-fluoro-methane. + The phrase "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula of CFC11 is CFCl3. The IUPAC name for CFC11 is trichloro(fluoro)methane. mol s-1 - "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. Thechemical formula of CFC113 is CCl2FCClF2. The IUPAC name for CFC113 is 1,1,2-trichloro-1,2,2-trifluoro-ethane. + The phrase "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula of CFC113 is CCl2FCClF2. The IUPAC name for CFC113 is 1,1,2-trichloro-1,2,2-trifluoroethane. mol s-1 - "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula of CFC113a CCl3CF3. The IUPAC name for CFC113a is 1,1,1-trichloro-2,2,2-trifluoro-ethane. + The phrase "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula of CFC113a is CCl3CF3. The IUPAC name for CFC113a is 1,1,1-trichloro-2,2,2-trifluoroethane. mol s-1 - "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere,i.e. summed over the atmospheric column and over the entire globe. The chemical formula ofCFC114 is CClF2CClF2. The IUPAC name for CFC114 is 1,2-dichloro-1,1,2,2-tetrafluoro-ethane. + The phrase "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula of CFC114 is CClF2CClF2. The IUPAC name for CFC114 is 1,2-dichloro-1,1,2,2-tetrafluoroethane. mol s-1 - "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. Thechemical formula of CFC115 is CClF2CF3. The IUPAC name for CFC115 is 1-chloro-1,1,2,2,2-pentafluoro-ethane. + The phrase "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula of CFC115 is CClF2CF3. The IUPAC name for CFC115 is 1-chloro-1,1,2,2,2-pentafluoroethane. mol s-1 - "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. Thechemical formula for CFC12 is CF2Cl2. The IUPAC name for CFC12 is dichloro-difluoro-methane. + The phrase "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for CFC12 is CF2Cl2. The IUPAC name for CFC12 is dichloro(difluoro)methane. @@ -26374,7 +29328,7 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific mol s-1 - "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. "Clox" describes a family of chemical species consisting of inorganic chlorine compounds with the exception of hydrogen chloride (HCl) and chlorine nitrate (ClONO2). "Clox" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity with a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. "Inorganic chlorine", sometimes referred to as Cly, describes a family of chemical species which result from the degradation of source gases containing chlorine (CFCs, HCFCs, VSLS) and natural inorganic chlorine sources such as seasalt and other aerosols. Standard names that use the term "inorganic_chlorine" are used for quantities that contain all inorganic chlorine species including HCl and ClONO2. The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. + The phrase "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. "Clox" describes a family of chemical species consisting of inorganic chlorine compounds with the exception of hydrogen chloride (HCl) and chlorine nitrate (ClONO2). The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. "Clox" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity with a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. "Inorganic chlorine", sometimes referred to as Cly, describes a family of chemical species which result from the degradation of source gases containing chlorine (CFCs, HCFCs, VSLS) and natural inorganic chlorine sources such as sea salt and other aerosols. Standard names that use the term "inorganic_chlorine" are used for quantities that contain all inorganic chlorine species including HCl and ClONO2. @@ -26458,35 +29412,35 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific mol s-1 - "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for halon1202 is CBr2F2. The IUPAC name for halon1202 is dibromo-difluoro-methane. + The phrase "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for Halon1202 is CBr2F2. The IUPAC name for Halon1202 is dibromo(difluoro)methane. mol s-1 - "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for halon1211 is CBrClF2. The IUPAC name for halon1211 is bromo-chloro-difluoro-methane. + The phrase "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for Halon1211 is CBrClF2. The IUPAC name for Halon1211 is bromo-chloro-difluoromethane. mol s-1 - "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for halon1301 is CBrF3. The IUPAC name for halon1301 is bromo-trifluoro-methane. + The phrase "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for Halon1301 is CBrF3. The IUPAC name for Halon1301 is bromo(trifluoro)methane. mol s-1 - "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for halo2402 is C2Br2F4. The IUPAC name for halon2402 is 1,2-dibromo-1,1,2,2-tetrafluoro-ethane. + The phrase "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for Halon2402 is C2Br2F4. The IUPAC name for Halon2402 is 1,2-dibromo-1,1,2,2-tetrafluoroethane. mol s-1 - "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for hcc140a is CH3CCl3. The IUPAC name for hcc140a is 1,1,1-trichloro-ethane. + The phrase "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for HCC140a, also called methyl chloroform, is CH3CCl3. The IUPAC name for HCC140a is 1,1,1-trichloroethane. @@ -26507,7 +29461,7 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific mol s-1 - "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for HCFC22 is CHClF2. The IUPAC name for HCFC22 is chloro-difluoro-methane. + The phrase "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for HCFC22 is CHClF2. The IUPAC name for HCFC22 is chloro(difluoro)methane. @@ -26556,14 +29510,14 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific mol s-1 - "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for the hydroperoxyl radical is HO2. In chemistry, a 'radical' is a highly reactive, and therefore shortlived, species. + The phrase "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for the hydroperoxyl radical is HO2. In chemistry, a "radical" is a highly reactive, and therefore short lived, species. mol s-1 - "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for the hydroxyl radical is OH. In chemistry, a 'radical' is a highly reactive, and therefore shortlived, species. + The phrase "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for the hydroxyl radical is OH. In chemistry, a "radical" is a highly reactive, and therefore short lived, species. @@ -26584,35 +29538,35 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific mol s-1 - "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. "Inorganic bromine", sometimes referred to as Bry, describes a family of chemical species which result from the degradation of source gases containing bromine (halons, methyl bromide, VSLS) and natural inorganic bromine sources such as volcanoes, seasalt and other aerosols. "Inorganic bromine" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. Standard names that use the term "brox" are used for quantities that contain all inorganic bromine species except HBr and BrONO2. + The phrase "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. "Inorganic bromine", sometimes referred to as Bry, describes a family of chemical species which result from the degradation of source gases containing bromine (halons, methyl bromide, VSLS) and natural inorganic bromine sources such as volcanoes, sea salt and other aerosols. "Inorganic bromine" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. Standard names that use the term "brox" are used for quantities that contain all inorganic bromine species except HBr and BrONO2. mol s-1 - "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. "Inorganic chlorine", sometimes referred to as Cly, describes a family of chemical species which result from the degradation of source gases containing chlorine (CFCs, HCFCs, VSLS) and natural inorganic chlorine sources such as seasalt and other aerosols. "Inorganic chlorine" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. Standard names that use the term "clox" are used for quantities that contain all inorganic chlorine species except HCl and ClONO2. + The phrase "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. "Inorganic chlorine", sometimes referred to as Cly, describes a family of chemical species which result from the degradation of source gases containing chlorine (CFCs, HCFCs, VSLS) and natural inorganic chlorine sources such as sea salt and other aerosols. "Inorganic chlorine" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. Standard names that use the term "clox" are used for quantities that contain all inorganic chlorine species except HCl and ClONO2. mol s-1 - "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for isoprene is CH2=C(CH3)CH=CH2. The IUPAC name for isoprene is 2-methyl-buta-1,3-diene. Isoprene is a member of the group of hydrocarbons known as terpenes. There are standard names for the terpene group as well as for some of the individual species. + The phrase "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for isoprene is CH2=C(CH3)CH=CH2. The IUPAC name for isoprene is 2-methylbuta-1,3-diene. Isoprene is a member of the group of hydrocarbons known as terpenes. There are standard names for the terpene group as well as for some of the individual species. mol s-1 - "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for limonene is C10H16. The IUPAC name for limonene is 1-methyl-4-prop-1-en-2-yl-cyclohexene. Limonene is a member of the group of hydrocarbons known as terpenes. There are standard names for the terpene group as well as for some of the individual species. + The phrase "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for limonene is C10H16. The IUPAC name for limonene is 1-methyl-4-prop-1-en-2-ylcyclohexene. Limonene is a member of the group of hydrocarbons known as terpenes. There are standard names for the terpene group as well as for some of the individual species. mol s-1 - "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere,i.e. summed over the atmospheric column and over the entire globe. The chemical formula for methane is CH4. Methane is a member of the group of hydrocarbons known as alkanes. Thereare standard names for the alkane group as well as for some of the individual species. + The phrase "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere,i.e. summed over the atmospheric column and over the entire globe. The chemical formula for methane is CH4. Methane is a member of the group of hydrocarbons known as alkanes. There are standard names for the alkane group as well as for some of the individual species. @@ -26647,7 +29601,7 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific mol s-1 - "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for methyl_peroxy_radical is CH3O2. In chemistry, a 'radical' is a highly reactive, and therefore shortlived, species. + The phrase "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for methyl_peroxy_radical is CH3O2. In chemistry, a "radical" is a highly reactive, and therefore short lived, species. @@ -26661,7 +29615,7 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific mol s-1 - "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for nitrate is NO3. In chemistry, a 'radical' is a highly reactive, and therefore shortlived, species. + The phrase "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for nitrate is NO3. In chemistry, a "radical" is a highly reactive, and therefore short lived, species. @@ -26787,7 +29741,7 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific mol s-1 - "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for toluene is C6H5CH3. Toluene has the same structure as benzene, except that one of the hydrogen atoms is replaced by a methyl group. The systematic name for toluene is methylbenzene. + The phrase "tendency_of_X" means derivative of X with respect to time. The construction "atmosphere_moles_of_X" means the total number of moles of X in the entire atmosphere, i.e. summed over the atmospheric column and over the entire globe. The chemical formula for toluene is C6H5CH3. Toluene has the same structure as benzene, except that one of the hydrogen atoms is replaced by a methyl group. The IUPAC name for toluene is methylbenzene. @@ -26818,11 +29772,11 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. The sum of turbulent deposition and gravitational settling is dry deposition. "tendency_of_X" means derivative of X with respect to time. - + m-2 s-1 - "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. The sum of turbulent deposition and gravitational settling is dry deposition. "tendency_of_X" means derivative of X with respect to time. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "atmosphere content" of a quantity refers to the vertical integral from the surface to the top of the atmosphere. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. The specification of a physical process by the phrase "due_to_ " process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. The sum of turbulent deposition and gravitational settling is dry deposition. @@ -26843,7 +29797,7 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific m s-1 - "tendency_of_X" means derivative of X with respect to time. Altitude is the (geometric) height above the geoid, which is the reference geopotential surface. The geoid is similar to mean sea level. "Bedrock" is the solid Earth surface beneath land ice or ocean water. + The phrase "tendency_of_X" means derivative of X with respect to time. Altitude is the (geometric) height above the geoid, which is the reference geopotential surface. The geoid is similar to mean sea level. "Bedrock" is the solid Earth surface beneath land ice, ocean water or soil. @@ -26972,6 +29926,13 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. "Layer" means any layer with upper and lower boundaries that have constant values in some vertical coordinate. There must be a vertical coordinate variable indicating the extent of the layer(s). If the layers are model layers, the vertical coordinate can be model_level_number, but it is recommended to specify a physical coordinate (in a scalar or auxiliary coordinate variable) as well. + + kg s-1 + + + The phrase "tendency_of_X" means derivative of X with respect to time. "Land ice" means glaciers, ice-caps and ice-sheets resting on bedrock and also includes ice-shelves. The horizontal domain over which the quantity is calculated is described by the associated coordinate variables and coordinate bounds or by a coordinate variable or scalar coordinate variable with the standard name of "region" supplied according to section 6.1.1 of the CF conventions. + + kg s-1 @@ -27102,28 +30063,42 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific s-1 - "tendency_of_X" means derivative of X with respect to time. Mass fraction is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). + The phrase "tendency_of_X" means derivative of X with respect to time. "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. s-1 - The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. Mass fraction is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). + The phrase "tendency_of_X" means derivative of X with respect to time. "Mass fraction" is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. s-1 - The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. Mass fraction is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). + The phrase "tendency_of_X" means derivative of X with respect to time. "Mass fraction" is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. + + + + s-1 + + + The phrase "tendency_of_X" means derivative of X with respect to time. "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". Convective cloud is that produced by the convection schemes in an atmosphere model. + + + + s-1 + + + The phrase "tendency_of_X" means derivative of X with respect to time. "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". Convective cloud is that produced by the convection schemes in an atmosphere model. "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. s-1 - 'Mass fraction' is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convectionschemes). "tendency_of_X" means derivative of X with respect to time. "condensed_water" means liquid and ice. + The phrase "tendency_of_X" means derivative of X with respect to time. "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). The phrase "condensed_water" means liquid and ice. @@ -27151,14 +30126,14 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific s-1 - The phrase "tendency_of_X" means derivative of X with respect to time. Mass fraction is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convectionschemes). The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. The phrase "condensed_water" means liquid and ice. "Boundary layer mixing" means turbulent motions that transport heat, water, momentum and chemical constituents within the atmospheric boundary layer and affect exchanges between the surface and the atmosphere. The atmospheric boundary layer is typically characterised by a well-mixed sub-cloud layer of order 500 metres, and by a more extended conditionally unstable layer with boundary-layer clouds up to 2 km. (Reference: IPCC Third Assessment Report, Working Group 1: The Scientific Basis, 7.2.2.3, https://www.ipcc.ch/ipccreports/tar/wg1/273.htm). + The phrase "tendency_of_X" means derivative of X with respect to time. "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. The phrase "condensed_water" means liquid and ice. "Boundary layer mixing" means turbulent motions that transport heat, water, momentum and chemical constituents within the atmospheric boundary layer and affect exchanges between the surface and the atmosphere. The atmospheric boundary layer is typically characterised by a well-mixed sub-cloud layer of order 500 metres, and by a more extended conditionally unstable layer with boundary-layer clouds up to 2 km. (Reference: IPCC Third Assessment Report, Working Group 1: The Scientific Basis, 7.2.2.3, https://archive.ipcc.ch/ipccreports/tar/wg1/273.htm). s-1 - 'Mass fraction' is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. 'cloud_microphysics' is the sum of many cloud processes such as condensation, evaporation, homogeneous nucleation, heterogeneous nucleation, deposition, sublimation, the Bergeron-Findeisen process, riming, accretion, aggregationand icefall. The precise list of processes that are included in 'cloud_microphysics' canvary between models. Where possible, the data variable should be accompanied by a complete description of the processes included, for example, by using a comment attribute. Standard names also exist to describe the tendencies due to the separate processes. "tendency_of_X" means derivative of X with respect to time. "condensed_water" means liquid and ice. + The phrase "tendency_of_X" means derivative of X with respect to time. Mass fraction is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). The phrase "condensed_water" means liquid and ice. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Cloud microphysics" is the sum of many cloud processes such as condensation, evaporation, homogeneous nucleation, heterogeneous nucleation, deposition, sublimation, the Bergeron-Findeisen process, riming, accretion, aggregation and icefall. The precise list of processes that are included in "cloud microphysics" can vary between models. Where possible, the data variable should be accompanied by a complete description of the processes included, for example, by using a comment attribute. Standard names also exist to describe the tendencies due to the separate processes. @@ -27179,7 +30154,7 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific s-1 - 'Mass fraction' is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convectionschemes). "tendency_of_X" means derivative of X with respect to time. + The phrase "tendency_of_X" means derivative of X with respect to time. "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). @@ -27207,21 +30182,21 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific s-1 - 'Mass fraction' is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convectionschemes). The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. The Bergeron-Findeisen process is the conversion of cloud liquid water to cloud ice arising from the fact that water vapor has a lower equilibrium vapor pressure with respect to ice than it has with respect to liquid water at the same subfreezing temperature. "tendency_of_X" means derivative of X with respect to time. + The phrase "tendency_of_X" means derivative of X with respect to time. "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. The Bergeron-Findeisen process is the conversion of cloud liquid water to cloud ice arising from the fact that water vapor has a lower equilibrium vapor pressure with respect to ice than it has with respect to liquid water at the same subfreezing temperature. s-1 - The phrase "tendency_of_X" means derivative of X with respect to time. Mass fraction is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Boundary layer mixing" means turbulent motions that transport heat, water, momentum and chemical constituents within the atmospheric boundary layer and affect exchanges between the surface and the atmosphere. The atmospheric boundary layer is typically characterised by a well-mixed sub-cloud layer of order 500 metres, and by a more extended conditionally unstable layer with boundary-layer clouds up to 2 km. (Reference: IPCC Third Assessment Report, Working Group 1: The Scientific Basis, 7.2.2.3, https://www.ipcc.ch/ipccreports/tar/wg1/273.htm). + The phrase "tendency_of_X" means derivative of X with respect to time. Mass fraction is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Boundary layer mixing" means turbulent motions that transport heat, water, momentum and chemical constituents within the atmospheric boundary layer and affect exchanges between the surface and the atmosphere. The atmospheric boundary layer is typically characterised by a well-mixed sub-cloud layer of order 500 metres, and by a more extended conditionally unstable layer with boundary-layer clouds up to 2 km. (Reference: IPCC Third Assessment Report, Working Group 1: The Scientific Basis, 7.2.2.3, https://archive.ipcc.ch/ipccreports/tar/wg1/273.htm). s-1 - 'Mass fraction' is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. 'cloud_microphysics' is the sum of many cloud processes such as condensation, evaporation, homogeneous nucleation, heterogeneous nucleation, deposition, sublimation, the Bergeron-Findeisen process, riming, accretion, aggregationand icefall. The precise list of processes that are included in 'cloud_microphysics' canvary between models. Where possible, the data variable should be accompanied by a complete description of the processes included, for example, by using a comment attribute. Standard names also exist to describe the tendencies due to the separate processes. "tendency_of_X" means derivative of X with respect to time. + The phrase "tendency_of_X" means derivative of X with respect to time. Mass fraction is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Cloud microphysics" is the sum of many cloud processes such as condensation, evaporation, homogeneous nucleation, heterogeneous nucleation, deposition, sublimation, the Bergeron-Findeisen process, riming, accretion, aggregation and icefall. The precise list of processes that are included in "cloud microphysics" can vary between models. Where possible, the data variable should be accompanied by a complete description of the processes included, for example, by using a comment attribute. Standard names also exist to describe the tendencies due to the separate processes. @@ -27249,21 +30224,21 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific s-1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. Heterogeneous nucleation occurs when a small particle of a substance other than water acts as a freezing or condensation nucleus. + The phrase "tendency_of_X" means derivative of X with respect to time. "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Heterogeneous nucleation occurs when a small particle of a substance other than water acts as a freezing or condensation nucleus. "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. s-1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. Heterogeneous nucleation occurs when a small particle of a substance other than water acts as a freezing or condensation nucleus. + The phrase "tendency_of_X" means derivative of X with respect to time. "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Heterogeneous nucleation occurs when a small particle of a substance other than water acts as a freezing or condensation nucleus. s-1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. Homogeneous nucleation occurs when a small number of water molecules combine to form a freezing or condensation nucleus. + The phrase "tendency_of_X" means derivative of X with respect to time. "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Homogeneous nucleation occurs when a small number of water molecules combine to form a freezing or condensation nucleus. @@ -27277,7 +30252,7 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific s-1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. + The phrase "tendency_of_X" means derivative of X with respect to time. "Mass fraction" is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. @@ -27291,7 +30266,7 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific s-1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. Riming is the rapid freezing of supercooled water onto a surface. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. + The phrase "tendency_of_X" means derivative of X with respect to time. "Mass fraction" is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Riming is the rapid freezing of supercooled water onto a surface. "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. @@ -27305,140 +30280,140 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific s-1 - 'Mass fraction' is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convectionschemes). "tendency_of_X" means derivative of X with respect to time. + The phrase "tendency_of_X" means derivative of X with respect to time. "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. s-1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. Accretion is the growth of a hydrometeor by collision with cloud droplets or ice crystals. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. + The phrase "tendency_of_X" means derivative of X with respect to time. "Mass fraction" is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Accretion is the growth of a hydrometeor by collision with cloud droplets or ice crystals. "Rain" means drops of water falling through the atmosphere that have a diameter greater than 0.5 mm. s-1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. Accretion is the growth of a hydrometeor by collision with cloud droplets or ice crystals. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. + The phrase "tendency_of_X" means derivative of X with respect to time. "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y" where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Accretion is the growth of a hydrometeor by collision with cloud droplets or ice crystals. s-1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. + The phrase "tendency_of_X" means derivative of X with respect to time. "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. s-1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. Autoconversion is the process of collision and coalescence which results in the formation of precipitation particles from cloud water droplets or ice crystals. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. + The phrase "tendency_of_X" means derivative of X with respect to time. "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Autoconversion is the process of collision and coalescence which results in the formation of precipitation particles from cloud water droplets or ice crystals. s-1 - 'Mass fraction' is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. The Bergeron-Findeisen process is the conversion of cloud liquid water to cloud ice arising from the fact that water vapor has a lower equilibrium vapor pressure with respect to ice than it has with respect to liquid water at the same subfreezing temperature. "tendency_of_X" means derivative of X with respect to time. + The phrase "tendency_of_X" means derivative of X with respect to time. "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. The Bergeron-Findeisen process is the conversion of cloud liquid water to cloud ice arising from the fact that water vapor has a lower equilibrium vapor pressure with respect to ice than it has with respect to liquid water at the same subfreezing temperature. s-1 - The phrase "tendency_of_X" means derivative of X with respect to time. Mass fraction is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Boundary layer mixing" means turbulent motions that transport heat, water, momentum and chemical constituents within the atmospheric boundary layer and affect exchanges between the surface and the atmosphere. The atmospheric boundary layer is typically characterised by a well-mixed sub-cloud layer of order 500 metres, and by a more extended conditionally unstable layer with boundary-layer clouds up to 2 km. (Reference: IPCC Third Assessment Report, Working Group 1: The Scientific Basis, 7.2.2.3, https://www.ipcc.ch/ipccreports/tar/wg1/273.htm). + The phrase "tendency_of_X" means derivative of X with respect to time. "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Boundary layer mixing" means turbulent motions that transport heat, water, momentum and chemical constituents within the atmospheric boundary layer and affect exchanges between the surface and the atmosphere. The atmospheric boundary layer is typically characterised by a well-mixed sub-cloud layer of order 500 metres, and by a more extended conditionally unstable layer with boundary-layer clouds up to 2 km. (Reference: IPCC Third Assessment Report, Working Group 1: The Scientific Basis, 7.2.2.3, https://archive.ipcc.ch/ipccreports/tar/wg1/273.htm). s-1 - 'Mass fraction' is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convectionschemes). The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. 'cloud_microphysics' is the sum of many cloud processes such as condensation, evaporation, homogeneous nucleation, heterogeneous nucleation, deposition, sublimation, the Bergeron-Findeisen process, riming, accretion, aggregationand icefall. The precise list of processes that are included in 'cloud_microphysics' canvary between models. Where possible, the data variable should be accompanied by a complete description of the processes included, for example, by using a comment attribute. Standard names also exist to describe the tendencies due to the separate processes. "tendency_of_X" means derivative of X with respect to time. + The phrase "tendency_of_X" means derivative of X with respect to time. "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Cloud microphysics" is the sum of many cloud processes such as condensation, evaporation, homogeneous nucleation, heterogeneous nucleation, deposition, sublimation, the Bergeron-Findeisen process, riming, accretion, aggregation and icefall. The precise list of processes that are included in "cloud microphysics" can vary between models. Where possible, the data variable should be accompanied by a complete description of the processes included, for example, by using a comment attribute. Standard names also exist to describe the tendencies due to the separate processes. s-1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. Evaporation is the conversion of liquid or solid into vapor. Condensation is the conversion of vapor into liquid. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. + The phrase "tendency_of_X" means derivative of X with respect to time. "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Condensation is the conversion of vapor into liquid. Evaporation is the conversion of liquid or solid into vapor. s-1 - The phrase "tendency_of_X" means derivative of X with respect to time. Mass fraction is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Condensation is the conversion of vapor into liquid. Evaporation is the conversion of liquid or solid into vapor. "Boundary layer mixing" means turbulent motions that transport heat, water, momentum and chemical constituents within the atmospheric boundary layer and affect exchanges between the surface and the atmosphere. The atmospheric boundary layer is typically characterised by a well-mixed sub-cloud layer of order 500 metres, and by a more extended conditionally unstable layer with boundary-layer clouds up to 2 km. (Reference: IPCC Third Assessment Report, Working Group 1: The Scientific Basis, 7.2.2.3, https://www.ipcc.ch/ipccreports/tar/wg1/273.htm). + The phrase "tendency_of_X" means derivative of X with respect to time. "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Condensation is the conversion of vapor into liquid. Evaporation is the conversion of liquid or solid into vapor. "Boundary layer mixing" means turbulent motions that transport heat, water, momentum and chemical constituents within the atmospheric boundary layer and affect exchanges between the surface and the atmosphere. The atmospheric boundary layer is typically characterised by a well-mixed sub-cloud layer of order 500 metres, and by a more extended conditionally unstable layer with boundary-layer clouds up to 2 km. (Reference: IPCC Third Assessment Report, Working Group 1: The Scientific Basis, 7.2.2.3, https://archive.ipcc.ch/ipccreports/tar/wg1/273.htm). s-1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. Evaporation is the conversion of liquid or solid into vapor. Condensation is the conversion of vapor into liquid. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. + The phrase "tendency_of_X" means derivative of X with respect to time. "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Condensation is the conversion of vapor into liquid. Evaporation is the conversion of liquid or solid into vapor. s-1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. Evaporation is the conversion of liquid or solid into vapor. Condensation is the conversion of vapor into liquid. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. "longwave" means longwave radiation. + The phrase "tendency_of_X" means derivative of X with respect to time. "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Condensation is the conversion of vapor into liquid. Evaporation is the conversion of liquid or solid into vapor. The term "longwave" means longwave radiation. s-1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. Evaporation is the conversion of liquid or solid into vapor. Condensation is the conversion of vapor into liquid. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. + The phrase "tendency_of_X" means derivative of X with respect to time. "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Condensation is the conversion of vapor into liquid. Evaporation is the conversion of liquid or solid into vapor. s-1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. Evaporation is the conversion of liquid or solid into vapor. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. "shortwave" means shortwave radiation. + The phrase "tendency_of_X" means derivative of X with respect to time. "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Condensation is the conversion of vapor into liquid. Evaporation is the conversion of liquid or solid into vapor. The term "shortwave" means shortwave radiation. s-1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. Evaporation is the conversion of liquid or solid into vapor. Condensation is the conversion of vapor into liquid. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. + The phrase "tendency_of_X" means derivative of X with respect to time. "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Condensation is the conversion of vapor into liquid. Evaporation is the conversion of liquid or solid into vapor. s-1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. + The phrase "tendency_of_X" means derivative of X with respect to time. "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. s-1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. Heterogeneous nucleation occurs when a small particle of a substance other than water acts asa freezing or condensation nucleus. + The phrase "tendency_of_X" means derivative of X with respect to time. "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Heterogeneous nucleation occurs when a small particle of a substance other than water acts as a freezing or condensation nucleus. s-1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, whereX is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. Homogeneous nucleation occurs when a small number of water molecules combine to form a freezing or condensation nucleus. + The phrase "tendency_of_X" means derivative of X with respect to time. "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Homogeneous nucleation occurs when a small number of water molecules combine to form a freezing or condensation nucleus. s-1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. + The phrase "tendency_of_X" means derivative of X with respect to time. "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. s-1 - Mass fraction is used in the construction mass_fraction_of_X_in_Y, where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. Riming is the rapid freezing of supercooled water onto a surface. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. + The phrase "tendency_of_X" means derivative of X with respect to time. "Mass fraction" is used in the construction "mass_fraction_of_X_in_Y", where X is a material constituent of Y. It means the ratio of the mass of X to the mass of Y (including X). A chemical species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). "Cloud liquid water" refers to the liquid phase of cloud water. A diameter of 0.2 mm has been suggested as an upper limit to the size of drops that shall be regarded as cloud drops; larger drops fall rapidly enough so that only very strong updrafts can sustain them. Any such division is somewhat arbitrary, and active cumulus clouds sometimes contain cloud drops much larger than this. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Cloud_drop. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Riming is the rapid freezing of supercooled water onto a surface. @@ -27452,7 +30427,7 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific mol s-1 - "tendency_of_X" means derivative of X with respect to time. "middle_atmosphere_moles_of_X" means the total number of moles of X contained in the troposphere and stratosphere, i.e, summed over that part of the atmospheric column and over the entire globe. The chemical formula of HCC140a is CH3CCl3. The IUPAC name for HCC 140a is 1,1,1-trichloroethane. + The phrase "tendency_of_X" means derivative of X with respect to time. The construction "middle_atmosphere_moles_of_X" means the total number of moles of X contained in the troposphere and stratosphere, i.e, summed over that part of the atmospheric column and over the entire globe. The chemical formula of HCC140a, also called methyl chloroform, is CH3CCl3. The IUPAC name for HCC140a is 1,1,1-trichloroethane. @@ -27487,91 +30462,91 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific mol m-3 s-1 - 'Mole concentration' means number of moles per unit volume, also called"molarity", and is used in the construction mole_concentration_of_X_in_Y, whereX is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. Aragonite is a mineral that is a polymorph of calcium carbonate. The chemical formula of aragonite is CaCO3. Standard names also exist for calcite, another polymorph of calcium carbonate. + The phrase "tendency_of_X" means derivative of X with respect to time. Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Aragonite is a mineral that is a polymorph of calcium carbonate. The chemical formula of aragonite is CaCO3. Standard names also exist for calcite, another polymorph of calcium carbonate. mol m-3 s-1 - 'Mole concentration' means number of moles per unit volume, also called"molarity", and is used in the construction mole_concentration_of_X_in_Y, whereX is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. Aragonite is a mineral that is a polymorph of calcium carbonate. The chemical formula of aragonite is CaCO3. Standard names also exist for calcite, another polymorph of calcium carbonate. + The phrase "tendency_of_X" means derivative of X with respect to time. Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Aragonite is a mineral that is a polymorph of calcium carbonate. The chemical formula of aragonite is CaCO3. Standard names also exist for calcite, another polymorph of calcium carbonate. mol m-3 s-1 - 'Mole concentration' means number of moles per unit volume, also called"molarity", and is used in the construction mole_concentration_of_X_in_Y, whereX is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. Calcite is a mineral that is a polymorph of calcium carbonate. Thechemical formula of calcite is CaCO3. Standard names also exist for aragonite, another polymorph of calcium carbonate. + The phrase "tendency_of_X" means derivative of X with respect to time. Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Calcite is a mineral that is a polymorph of calcium carbonate. The chemical formula of calcite is CaCO3. Standard names also exist for aragonite, another polymorph of calcium carbonate. mol m-3 s-1 - 'Mole concentration' means number of moles per unit volume, also called"molarity", and is used in the construction mole_concentration_of_X_in_Y, whereX is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The phrase 'expressed_as' is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. Calcite is a mineral that is a polymorph of calcium carbonate. Thechemical formula of calcite is CaCO3. Standard names also exist for aragonite, another polymorph of calcium carbonate. + The phrase "tendency_of_X" means derivative of X with respect to time. Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Calcite is a mineral that is a polymorph of calcium carbonate. The chemical formula of calcite is CaCO3. Standard names also exist for aragonite, another polymorph of calcium carbonate. mol m-3 s-1 - 'Mole concentration' means number of moles per unit volume, also called"molarity", and is used in the construction mole_concentration_of_X_in_Y, whereX is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. "Dissolved inorganic carbon" describes a family of chemical species in solution, including carbon dioxide, carbonic acid and the carbonate and bicarbonate anions. "Dissolved inorganic carbon" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. + The phrase "tendency_of_X" means derivative of X with respect to time. Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Dissolved inorganic carbon" describes a family of chemical species in solution, including carbon dioxide, carbonic acid and the carbonate and bicarbonate anions. "Dissolved inorganic carbon" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. mol m-3 s-1 - 'Mole concentration' means number of moles per unit volume, also called"molarity", and is used in the construction mole_concentration_of_X_in_Y, whereX is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. "Dissolved inorganic iron" means iron ions, in oxidation states of both Fe2+ and Fe3+, in solution. + The phrase "tendency_of_X" means derivative of X with respect to time. Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Dissolved inorganic iron" means iron ions, in oxidation states of both Fe2+ and Fe3+, in solution. mol m-3 s-1 - 'Mole concentration' means number of moles per unit volume, also called"molarity", and is used in the construction mole_concentration_of_X_in_Y, whereX is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. "Inorganic nitrogen" describes a family of chemical species which, in an ocean model, usually includes nitrite, nitrate and ammonium which act as nitrogennutrients. "Inorganic nitrogen" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. + The phrase "tendency_of_X" means derivative of X with respect to time. Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Inorganic nitrogen" describes a family of chemical species which, in an ocean model, usually includes nitrite, nitrate and ammonium which act as nitrogen nutrients. "Inorganic nitrogen" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. mol m-3 s-1 - 'Mole concentration' means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. "Dissolved inorganic phosphorus" means the sum of all inorganic phosphorus in solution (including phosphate, hydrogen phosphate, dihydrogen phosphate, and phosphoric acid). + The phrase "tendency_of_X" means derivative of X with respect to time. Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Dissolved inorganic phosphorus" means the sum of all inorganic phosphorus in solution (including phosphate, hydrogen phosphate, dihydrogen phosphate, and phosphoric acid). mol m-3 s-1 - 'Mole concentration' means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. "Dissolved inorganic silicon" means the sum of all inorganic silicon in solution (including silicic acid and its first dissociated anion SiO(OH)3-). + The phrase "tendency_of_X" means derivative of X with respect to time. Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Dissolved inorganic silicon" means the sum of all inorganic silicon in solution (including silicic acid and its first dissociated anion SiO(OH)3-). mol m-3 s-1 - Dissolution, remineralization and desorption of iron back to the dissolved phase 'Mole concentration' means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity namedby omitting the phrase. "tendency_of_X" means derivative of X with respect to time. + The quantity with standard name tendency_of_mole_concentration_of_dissolved_iron_in_sea_water_due_to_dissolution_from_inorganic_particles is the change in concentration caused by the processes of dissolution, remineralization and desorption of iron back to the dissolved phase.The phrase "tendency_of_X" means derivative of X with respect to time. "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. mol m-3 s-1 - 'Mole concentration' means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. Phytoplankton are autotrophic prokaryotic or eukaryotic algae that live near the water surface where there is sufficient light to support photosynthesis. 'Grazing of phytoplankton' means the grazing of phytoplankton by zooplankton. + The phrase "tendency_of_X" means derivative of X with respect to time. Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Phytoplankton are algae that grow where there is sufficient light to support photosynthesis. "Grazing of phytoplankton" means the grazing of phytoplankton by zooplankton. mol m-3 s-1 - 'Mole concentration' means number of moles per unit volume, also called"molarity", and is used in the construction mole_concentration_of_X_in_Y, whereX is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. + The phrase "tendency_of_X" means derivative of X with respect to time. Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. mol m-3 s-1 - 'Mole concentration' means number of moles per unit volume, also called"molarity", and is used in the construction mole_concentration_of_X_in_Y, whereX is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. + The phrase "tendency_of_X" means derivative of X with respect to time. Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. @@ -27592,49 +30567,49 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific mol m-3 s-1 - "tendency_of_X" means derivative of X with respect to time. Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Phytoplankton are autotrophic prokaryotic or eukaryotic algae that live near the water surface where there is sufficient light to support photosynthesis. "Grazing of phytoplankton" means the grazing of phytoplankton by zooplankton. + The phrase "tendency_of_X" means derivative of X with respect to time. Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Phytoplankton are algae that grow where there is sufficient light to support photosynthesis. "Grazing of phytoplankton" means the grazing of phytoplankton by zooplankton. mol m-3 s-1 - 'Mole concentration' means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Net primary production is the excess of gross primary production (the rate of synthesis of biomass from inorganic precursors) by autotrophs ("producers"), for example, photosynthesis in plants or phytoplankton, over the rate at which the autotrophs themselves respire some of this biomass. In the oceans, carbon production per unit volume is often found at a number of depths at a given horizontal location. That quantity can then be integrated to calculate production per unit area at the location. Standard names for production per unit area use the term "productivity". "tendency_of_X" means derivative of X with respect to time. + The phrase "tendency_of_X" means derivative of X with respect to time. Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Net primary production is the excess of gross primary production (the rate of synthesis of biomass from inorganic precursors) by autotrophs ("producers"), for example, photosynthesis in plants or phytoplankton, over the rate at which the autotrophs themselves respire some of this biomass. In the oceans, carbon production per unit volume is often found at a number of depths at a given horizontal location. That quantity can then be integrated to calculate production per unit area at the location. Standard names for production per unit area use the term "productivity". mol m-3 s-1 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Production of carbon" means the production of biomass expressed as the mass of carbon which it contains. Net primary production is the excess of gross primary production (the rate of synthesis of biomass from inorganic precursors) by autotrophs ("producers"), for example, photosynthesis in plants or phytoplankton, over the rate at which the autotrophs themselves respire some of this biomass. In the oceans, carbon production per unit volume is often found at a number of depths at a given horizontal location. That quantity can then be integrated to calculate production per unit area at the location. Standard names for production per unit area use the term "productivity". "Calcareous phytoplankton" are phytoplankton that produce calcite. Phytoplankton are autotrophic prokaryotic or eukaryotic algae that live near the water surface where there is sufficient light to support photosynthesis. Calcite is a mineral that is a polymorph of calcium carbonate. The chemical formula of calcite is CaCO3. Standard names also exist for aragonite, another polymorph of calcium carbonate. "tendency_of_X" means derivative of X with respect to time. + The phrase "tendency_of_X" means derivative of X with respect to time. Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Production of carbon" means the production of biomass expressed as the mass of carbon which it contains. Net primary production is the excess of gross primary production (the rate of synthesis of biomass from inorganic precursors) by autotrophs ("producers"), for example, photosynthesis in plants or phytoplankton, over the rate at which the autotrophs themselves respire some of this biomass. In the oceans, carbon production per unit volume is often found at a number of depths at a given horizontal location. That quantity can then be integrated to calculate production per unit area at the location. Standard names for production per unit area use the term "productivity". "Calcareous phytoplankton" are phytoplankton that produce calcite. Phytoplankton are algae that grow where there is sufficient light to support photosynthesis. Calcite is a mineral that is a polymorph of calcium carbonate. The chemical formula of calcite is CaCO3. Standard names also exist for aragonite, another polymorph of calcium carbonate. mol m-3 s-1 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Production of carbon" means the production of biomass expressed as the mass of carbon which it contains. Net primary production is the excess of gross primary production (the rate of synthesis of biomass from inorganic precursors) by autotrophs ("producers"), for example, photosynthesis in plants or phytoplankton, over the rate at which the autotrophs themselves respire some of this biomass. In the oceans, carbon production per unit volume is often found at a number of depths at a given horizontal location. That quantity can then be integrated to calculate production per unit area at the location. Standard names for production per unit area use the term "productivity". Diatoms are single-celled phytoplankton with an external skeleton made of silica. Phytoplankton are autotrophic prokaryotic or eukaryotic algae that live near the water surface where there is sufficient light to support photosynthesis. "tendency_of_X" means derivative of X with respect to time. + The phrase "tendency_of_X" means derivative of X with respect to time. Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Production of carbon" means the production of biomass expressed as the mass of carbon which it contains. Net primary production is the excess of gross primary production (the rate of synthesis of biomass from inorganic precursors) by autotrophs ("producers"), for example, photosynthesis in plants or phytoplankton, over the rate at which the autotrophs themselves respire some of this biomass. In the oceans, carbon production per unit volume is often found at a number of depths at a given horizontal location. That quantity can then be integrated to calculate production per unit area at the location. Standard names for production per unit area use the term "productivity". Diatoms are single-celled phytoplankton with an external skeleton made of silica. Phytoplankton are algae that grow where there is sufficient light to support photosynthesis. - + mol m-3 s-1 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Production of carbon" means the production of biomass expressed as the mass of carbon which it contains. Net primary production is the excess of gross primary production (the rate of synthesis of biomass from inorganic precursors) by autotrophs ("producers"), for example, photosynthesis in plants or phytoplankton, over the rate at which the autotrophs themselves respire some of this biomass. In the oceans, carbon production per unit volume is often found at a number of depths at a given horizontal location. That quantity can then be integrated to calculate production per unit area at the location. Standard names for production per unit area use the term "productivity". In ocean modelling, diazotrophs are phytoplankton of the phylum cyanobacteria distinct from other phytoplankton groups in their ability to fix nitrogen gas in addition to nitrate and ammonium. Phytoplankton are autotrophic prokaryotic or eukaryotic algae that live near the water surface where there is sufficient light to support photosynthesis. "tendency_of_X" means derivative of X with respect to time. + The phrase "tendency_of_X" means derivative of X with respect to time. "Mole concentration" means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical species or biological group denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction "A_expressed_as_B", where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Production of carbon" means the production of biomass expressed as the mass of carbon which it contains. Net primary production is the excess of gross primary production (the rate of synthesis of biomass from inorganic precursors) by autotrophs ("producers"), for example, photosynthesis in plants or phytoplankton, over the rate at which the autotrophs themselves respire some of this biomass. In the oceans, carbon production per unit volume is often found at a number of depths at a given horizontal location. That quantity can then be integrated to calculate production per unit area at the location. Standard names for production per unit area use the term "productivity". Phytoplankton are algae that grow where there is sufficient light to support photosynthesis. Diazotrophic phytoplankton are phytoplankton (predominantly from Phylum Cyanobacteria) that are able to fix molecular nitrogen (gas or solute) in addition to nitrate and ammonium. mol m-3 s-1 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Production of carbon" means the production of biomass expressed as the mass of carbon which it contains. Net primary production is the excess of gross primary production (the rate of synthesis of biomass from inorganic precursors) by autotrophs ("producers"), for example, photosynthesis in plants or phytoplankton, over the rate at which the autotrophs themselves respire some of this biomass. In the oceans, carbon production per unit volume is often found at a number of depths at a given horizontal location. That quantity can then be integrated to calculate production per unit area at the location. Standard names for production per unit area use the term "productivity". Phytoplankton are autotrophic prokaryotic or eukaryotic algae that live near the water surface where there is sufficient light to support photosynthesis. "Miscellaneous phytoplankton" are all those phytoplankton that are not diatoms, diazotrophs, calcareous phytoplankton, picophytoplankton or other seperately named components of the phytoplankton population. "tendency_of_X" means derivative of X with respect to time. + The phrase "tendency_of_X" means derivative of X with respect to time. Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Production of carbon" means the production of biomass expressed as the mass of carbon which it contains. Net primary production is the excess of gross primary production (the rate of synthesis of biomass from inorganic precursors) by autotrophs ("producers"), for example, photosynthesis in plants or phytoplankton, over the rate at which the autotrophs themselves respire some of this biomass. In the oceans, carbon production per unit volume is often found at a number of depths at a given horizontal location. That quantity can then be integrated to calculate production per unit area at the location. Standard names for production per unit area use the term "productivity". Phytoplankton are algae that grow where there is sufficient light to support photosynthesis. "Miscellaneous phytoplankton" are all those phytoplankton that are not diatoms, diazotrophs, calcareous phytoplankton, picophytoplankton or other separately named components of the phytoplankton population. mol m-3 s-1 - Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Production of carbon" means the production of biomass expressed as the mass of carbon which it contains. Net primary production is the excess of gross primary production (the rate of synthesis of biomass from inorganic precursors) by autotrophs ("producers"), for example, photosynthesis in plants or phytoplankton, over the rate at which the autotrophs themselves respire some of this biomass. In the oceans, carbon production per unit volume is often found at a number of depths at a given horizontal location. That quantity can then be integrated to calculate production per unit area at the location. Standard names for production per unit area use the term "productivity". Picophytoplankton are phytoplankton of less than 2 micrometers in size. Phytoplankton are autotrophic prokaryotic or eukaryotic algae that live near the water surface where there is sufficient light to support photosynthesis. "tendency_of_X" means derivative of X with respect to time. + The phrase "tendency_of_X" means derivative of X with respect to time. Mole concentration means number of moles per unit volume, also called "molarity", and is used in the construction "mole_concentration_of_X_in_Y", where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The phrase "expressed_as" is used in the construction A_expressed_as_B, where B is a chemical constituent of A. It means that the quantity indicated by the standard name is calculated solely with respect to the B contained in A, neglecting all other chemical constituents of A. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Production of carbon" means the production of biomass expressed as the mass of carbon which it contains. Net primary production is the excess of gross primary production (the rate of synthesis of biomass from inorganic precursors) by autotrophs ("producers"), for example, photosynthesis in plants or phytoplankton, over the rate at which the autotrophs themselves respire some of this biomass. In the oceans, carbon production per unit volume is often found at a number of depths at a given horizontal location. That quantity can then be integrated to calculate production per unit area at the location. Standard names for production per unit area use the term "productivity". Picophytoplankton are phytoplankton of less than 2 micrometers in size. Phytoplankton are algae that grow where there is sufficient light to support photosynthesis. @@ -27655,7 +30630,7 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific mol m-3 s-1 - 'Mole concentration' means number of moles per unit volume, also called"molarity", and is used in the construction mole_concentration_of_X_in_Y, whereX is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as 'nitrogen' or a phrase such as 'nox_expressed_as_nitrogen'. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. + The phrase "tendency_of_X" means derivative of X with respect to time. Mole concentration means number of moles per unit volume, also called"molarity", and is used in the construction mole_concentration_of_X_in_Y, where X is a material constituent of Y. A chemical or biological species denoted by X may be described by a single term such as "nitrogen" or a phrase such as "nox_expressed_as_nitrogen". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. @@ -27739,28 +30714,28 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific mol m-2 s-1 - "Content" indicates a quantity per unit area. Runoff is the liquid water which drains from land. If not specified, "runoff" refers to the sum of surface runoff and subsurface drainage. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of termswhich together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "ocean content" of a quantity refers to the vertical integral from the surface to the bottom of the ocean. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Runoff is the liquid water which drains from land. If not specified, "runoff" refers to the sum of surface runoff and subsurface drainage. mol m-2 s-1 - "Content" indicates a quantity per unit area. The specification of a physical process by the phrase due_to_process means that the quantity named is asingle term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "ocean content" of a quantity refers to the vertical integral from the surface to the bottom of the ocean. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Sedimentation" is the sinking of particulate matter to the floor of a body of water. mol m-2 s-1 - "Content" indicates a quantity per unit area. "tendency_of_X" means derivative of X with respect to time. "Dissolved inorganic carbon" describes a family of chemical species in solution, including carbon dioxide, carbonic acid and the carbonate and bicarbonate anions. "Dissolved inorganic carbon" isthe term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "ocean content" of a quantity refers to the vertical integral from the surface to the bottom of the ocean. "Dissolved inorganic carbon" describes a family of chemical species in solution, including carbon dioxide, carbonic acid and the carbonate and bicarbonate anions. "Dissolved inorganic carbon" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. mol m-2 s-1 - "Content" indicates a quantity per unit area. The specification of a physical process by the phrase due_to_process means that the quantity named is asingle term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. "Dissolved inorganic carbon" describes a family of chemical species in solution, including carbon dioxide, carbonic acid and the carbonate and bicarbonate anions. "Dissolved inorganic carbon" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "ocean content" of a quantity refers to the vertical integral from the surface to the bottom of the ocean. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Dissolved inorganic carbon" describes a family of chemical species in solution, including carbon dioxide, carbonic acid and the carbonate and bicarbonate anions. "Dissolved inorganic carbon" is the term used in standard names for all species belonging to the family that are represented within a given model. The list of individual species that are included in a quantity having a group chemical standard name can vary between models. Where possible, the data variable should be accompanied by a complete description of the species represented, for example, by using a comment attribute. @@ -27774,7 +30749,7 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific mol m-2 s-1 - "Content" indicates a quantity per unit area. The specification of a physical process by the phrase due_to_process means that the quantity named is asingle term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. "Dissolved inorganic iron" means iron ions, in oxidation states of both Fe2+ and Fe3+, in solution. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "ocean content" of a quantity refers to the vertical integral from the surface to the bottom of the ocean. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Dissolved inorganic iron" means iron ions, in oxidation states of both Fe2+ and Fe3+, in solution. @@ -27823,21 +30798,21 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific mol m-2 s-1 - The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "ocean content" of a quantity refers to the vertical integral from the surface to the bottom of the ocean. The specification of a physical process by the phrase due_to_process means that the quantity named is asingle term in a sum of terms which together compose the general quantity named by omitting the phrase. 'Denitrification' is the conversion of nitrate into gaseous compounds such as nitric oxide, nitrous oxide and molecular nitrogen which are then emitted to the atmosphere. 'Sedimentation' is the sinking of particulate matter to the floor of a body of water. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "ocean content" of a quantity refers to the vertical integral from the surface to the bottom of the ocean. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Denitrification" is the conversion of nitrate into gaseous compounds such as nitric oxide, nitrous oxide and molecular nitrogen which are then emitted to the atmosphere. "Sedimentation" is the sinking of particulate matter to the floor of a body of water. mol m-2 s-1 - "Content" indicates a quantity per unit area. The specification of a physical process by the phrase due_to_process means that the quantity named is asingle term in a sum of terms which together compose the general quantity named by omitting the phrase. Deposition of nitrogen into the ocean is the sum of dry and wet depositionof nitrogen species onto the ocean surface from the atmosphere. 'Nitrogen fixation' means the production of ammonia from nitrogen gas. Organisms that fix nitrogen are termed 'diazotrophs'. Diazotrophic phytoplankton can fix atmospheric nitrogen, thus increasing the content of nitrogen in the ocean. Runoff is the liquid water which drains from land. If not specified, "runoff" refers to the sum of surface runoff and subsurface drainage."tendency_of_X" means derivative of X with respect to time. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "ocean content" of a quantity refers to the vertical integral from the surface to the bottom of the ocean. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Deposition of nitrogen into the ocean is the sum of dry and wet deposition of nitrogen species onto the ocean surface from the atmosphere. "Nitrogen fixation" means the production of ammonia from nitrogen gas. Organisms that fix nitrogen are termed "diazotrophs". Diazotrophic phytoplankton can fix atmospheric nitrogen, thus increasing the content of nitrogen in the ocean. Runoff is the liquid water which drains from land. If not specified, "runoff" refers to the sum of surface runoff and subsurface drainage. mol m-2 s-1 - "Content" indicates a quantity per unit area. The specification of a physical process by the phrase due_to_process means that the quantity named is asingle term in a sum of terms which together compose the general quantity named by omitting the phrase. 'Nitrogen fixation' means the production of ammonia from nitrogen gas. Organisms that fix nitrogen are termed 'diazotrophs'. Diazotrophic phytoplankton can fix atmospheric nitrogen, thus increasing the content of nitrogen in the ocean. "tendency_of_X" means derivative of X with respect to time. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "ocean content" of a quantity refers to the vertical integral from the surface to the bottom of the ocean. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Nitrogen fixation" means the production of ammonia from nitrogen gas. Organisms that fix nitrogen are termed "diazotrophs". Diazotrophic phytoplankton can fix atmospheric nitrogen, thus increasing the content of nitrogen in the ocean. @@ -27872,7 +30847,7 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific mol m-2 s-1 - "Content" indicates a quantity per unit area. Runoff is the liquid water which drains from land. If not specified, "runoff" refers to the sum of surface runoff and subsurface drainage. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of termswhich together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "ocean content" of a quantity refers to the vertical integral from the surface to the bottom of the ocean. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Runoff is the liquid water which drains from land. If not specified, "runoff" refers to the sum of surface runoff and subsurface drainage. @@ -27956,7 +30931,7 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific mol m-2 s-1 - "Content" indicates a quantity per unit area. The specification of a physical process by the phrase due_to_process means that the quantity named is asingle term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The "ocean content" of a quantity refers to the vertical integral from the surface to the bottom of the ocean. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. @@ -28068,21 +31043,21 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific s-1 - The phrase "tendency_of_X" means derivative of X with respect to time. "X_area_fraction" means the fraction of horizontal area occupied by X. Sea ice area fraction is area of the sea surface occupied by sea ice. It is also called "sea ice concentration". "Sea ice" means all ice floating in the sea which has formed from freezing sea water, rather than by other processes such as calving of land ice to form icebergs. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Sea ice dynamics" refers to the motion of sea ice. + The phrase "tendency_of_X" means derivative of X with respect to time. "Area fraction" is the fraction of a grid cell's horizontal area that has some characteristic of interest. It is evaluated as the area of interest divided by the grid cell area. Sea ice area fraction is area of the sea surface occupied by sea ice. It is also called "sea ice concentration". "Sea ice" means all ice floating in the sea which has formed from freezing sea water, rather than by other processes such as calving of land ice to form icebergs. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Sea ice dynamics" refers to the motion of sea ice. s-1 - The phrase "tendency_of_X" means derivative of X with respect to time. "X_area_fraction" means the fraction of horizontal area occupied by X. Sea ice area fraction is area of the sea surface occupied by sea ice. It is also called "sea ice concentration". "Sea ice" means all ice floating in the sea which has formed from freezing sea water, rather than by other processes such as calving of land ice to form icebergs. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Sea ice "ridging" occurs in rough sea conditions. The motion of the sea surface can cause areas of sea ice to deform and fold resulting in ridged upper and lower surfaces. The ridges can be as much as twenty metres thick if thick ice is deformed. + The phrase "tendency_of_X" means derivative of X with respect to time. "Area fraction" is the fraction of a grid cell's horizontal area that has some characteristic of interest. It is evaluated as the area of interest divided by the grid cell area. Sea ice area fraction is area of the sea surface occupied by sea ice. It is also called "sea ice concentration". "Sea ice" means all ice floating in the sea which has formed from freezing sea water, rather than by other processes such as calving of land ice to form icebergs. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Sea ice "ridging" occurs in rough sea conditions. The motion of the sea surface can cause areas of sea ice to deform and fold resulting in ridged upper and lower surfaces. The ridges can be as much as twenty metres thick if thick ice is deformed. s-1 - The phrase "tendency_of_X" means derivative of X with respect to time. "X_area_fraction" means the fraction of horizontal area occupied by X. Sea ice area fraction is area of the sea surface occupied by sea ice. It is also called "sea ice concentration". "Sea ice" means all ice floating in the sea which has formed from freezing sea water, rather than by other processes such as calving of land ice to form icebergs. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Sea ice thermodynamics" refers to the addition or subtraction of mass due to surface and basal fluxes. + The phrase "tendency_of_X" means derivative of X with respect to time. "Area fraction" is the fraction of a grid cell's horizontal area that has some characteristic of interest. It is evaluated as the area of interest divided by the grid cell area. Sea ice area fraction is area of the sea surface occupied by sea ice. It is also called "sea ice concentration". "Sea ice" means all ice floating in the sea which has formed from freezing sea water, rather than by other processes such as calving of land ice to form icebergs. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Sea ice thermodynamics" refers to the addition or subtraction of mass due to surface and basal fluxes. @@ -28110,7 +31085,7 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific mol m-3 s-1 - 'sea_water_alkalinity_expressed_as_mole_equivalent' is the total alkalinity equivalent concentration (including carbonate, nitrogen, silicate, and borate components). The specification of a physical process by the phrase due_to_process means that thequantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X withrespect to time. + The phrase "tendency_of_X" means derivative of X with respect to time. sea_water_alkalinity_expressed_as_mole_equivalent is the total alkalinity equivalent concentration (including carbonate, nitrogen, silicate, and borate components). The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. @@ -28120,18 +31095,18 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific The phrase "tendency_of_X" means derivative of X with respect to time. This tendency encompasses all processes that impact on the time changes for the heat content within a grid cell. The phrase "expressed_as_heat_content" means that this quantity is calculated as the specific heat capacity times density of sea water multiplied by the conservative temperature of the sea water in the grid cell and integrated over depth. If used for a layer heat content, coordinate bounds should be used to define the extent of the layers. If no coordinate bounds are specified, it is assumed that the integral is calculated over the entire vertical extent of the medium, e.g, if the medium is sea water the integral is assumed to be calculated over the full depth of the ocean. Conservative Temperature is defined as part of the Thermodynamic Equation of Seawater 2010 (TEOS-10) which was adopted in 2010 by the International Oceanographic Commission (IOC). Conservative Temperature is specific potential enthalpy (which has the standard name sea_water_specific_potential_enthalpy) divided by a fixed value of the specific heat capacity of sea water, namely cp_0 = 3991.86795711963 J kg-1 K-1. Conservative Temperature is a more accurate measure of the "heat content" of sea water, by a factor of one hundred, than is potential temperature. Because of this, it can be regarded as being proportional to the heat content of sea water per unit mass. Reference: www.teos-10.org; McDougall, 2003 doi: 10.1175/1520-0485(2003)033<0945:PEACOV>2.0.CO;2. - + W m-2 - The phrase "tendency_of_X" means derivative of X with respect to time. The phrase "expressed_as_heat_content" means that this quantity is calculated as the specific heat capacity times density of sea water multiplied by the conservative temperature of the sea water in the grid cell and integrated over depth. If used for a layer heat content, coordinate bounds should be used to define the extent of the layers. If no coordinate bounds are specified, it is assumed that the integral is calculated over the entire vertical extent of the medium, e.g, if the medium is sea water the integral is assumed to be calculated over the full depth of the ocean. Conservative Temperature is defined as part of the Thermodynamic Equation of Seawater 2010 (TEOS-10) which was adopted in 2010 by the International Oceanographic Commission (IOC). Conservative Temperature is specific potential enthalpy (which has the standard name sea_water_specific_potential_enthalpy) divided by a fixed value of the specific heat capacity of sea water, namely cp_0 = 3991.86795711963 J kg-1 K-1. Conservative Temperature is a more accurate measure of the "heat content" of sea water, by a factor of one hundred, than is potential temperature. Because of this, it can be regarded as being proportional to the heat content of sea water per unit mass. Reference: www.teos-10.org; McDougall, 2003 doi: 10.1175/1520-0485(2003)033<0945:PEACOV>2.0.CO;2. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Parameterized eddy advection in an ocean model means the part due to a scheme representing parameterized eddy-induced advective effects not included in the resolved model velocity field. Parameterized eddy advection can be represented on various spatial scales and there are standard names for parameterized_mesoscale_eddy_advection and parameterized_submesoscale_eddy_advection which both contribute to the total parameterized eddy advection. Additionally, when the parameterized advective process is represented in the model as a skew-diffusion rather than an advection, then the parameterized skew diffusion should be included in this diagnostic. The convergence of a skew-flux is identical (in the continuous formulation) to the convergence of an advective flux, making their tendencies the same. + The phrase "tendency_of_X" means derivative of X with respect to time. The phrase "expressed_as_heat_content" means that this quantity is calculated as the specific heat capacity times density of sea water multiplied by the conservative temperature of the sea water in the grid cell and integrated over depth. If used for a layer heat content, coordinate bounds should be used to define the extent of the layers. If no coordinate bounds are specified, it is assumed that the integral is calculated over the entire vertical extent of the medium, e.g, if the medium is sea water the integral is assumed to be calculated over the full depth of the ocean. Conservative Temperature is defined as part of the Thermodynamic Equation of Seawater 2010 (TEOS-10) which was adopted in 2010 by the International Oceanographic Commission (IOC). Conservative Temperature is specific potential enthalpy (which has the standard name sea_water_specific_potential_enthalpy) divided by a fixed value of the specific heat capacity of sea water, namely cp_0 = 3991.86795711963 J kg-1 K-1. Conservative Temperature is a more accurate measure of the "heat content" of sea water, by a factor of one hundred, than is potential temperature. Because of this, it can be regarded as being proportional to the heat content of sea water per unit mass. Reference: www.teos-10.org; McDougall, 2003 doi: 10.1175/1520-0485(2003)033<0945:PEACOV>2.0.CO;2. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Dianeutral mixing" means mixing across surfaces of neutral buoyancy. "Parameterized" means the part due to a scheme representing processes which are not explicitly resolved by the model. - + W m-2 - The phrase "tendency_of_X" means derivative of X with respect to time. The phrase "expressed_as_heat_content" means that this quantity is calculated as the specific heat capacity times density of sea water multiplied by the conservative temperature of the sea water in the grid cell and integrated over depth. If used for a layer heat content, coordinate bounds should be used to define the extent of the layers. If no coordinate bounds are specified, it is assumed that the integral is calculated over the entire vertical extent of the medium, e.g, if the medium is sea water the integral is assumed to be calculated over the full depth of the ocean. Conservative Temperature is defined as part of the Thermodynamic Equation of Seawater 2010 (TEOS-10) which was adopted in 2010 by the International Oceanographic Commission (IOC). Conservative Temperature is specific potential enthalpy (which has the standard name sea_water_specific_potential_enthalpy) divided by a fixed value of the specific heat capacity of sea water, namely cp_0 = 3991.86795711963 J kg-1 K-1. Conservative Temperature is a more accurate measure of the "heat content" of sea water, by a factor of one hundred, than is potential temperature. Because of this, it can be regarded as being proportional to the heat content of sea water per unit mass. Reference: www.teos-10.org; McDougall, 2003 doi: 10.1175/1520-0485(2003)033<0945:PEACOV>2.0.CO;2. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Eddy dianeutral mixing" means dianeutral mixing, i.e. mixing across neutral directions caused by the unresolved turbulent motion of eddies of all types (e.g., breaking gravity waves, boundary layer turbulence, etc.). + The phrase "tendency_of_X" means derivative of X with respect to time. The phrase "expressed_as_heat_content" means that this quantity is calculated as the specific heat capacity times density of sea water multiplied by the conservative temperature of the sea water in the grid cell and integrated over depth. If used for a layer heat content, coordinate bounds should be used to define the extent of the layers. If no coordinate bounds are specified, it is assumed that the integral is calculated over the entire vertical extent of the medium, e.g, if the medium is sea water the integral is assumed to be calculated over the full depth of the ocean. Conservative Temperature is defined as part of the Thermodynamic Equation of Seawater 2010 (TEOS-10) which was adopted in 2010 by the International Oceanographic Commission (IOC). Conservative Temperature is specific potential enthalpy (which has the standard name sea_water_specific_potential_enthalpy) divided by a fixed value of the specific heat capacity of sea water, namely cp_0 = 3991.86795711963 J kg-1 K-1. Conservative Temperature is a more accurate measure of the "heat content" of sea water, by a factor of one hundred, than is potential temperature. Because of this, it can be regarded as being proportional to the heat content of sea water per unit mass. Reference: www.teos-10.org; McDougall, 2003 doi: 10.1175/1520-0485(2003)033<0945:PEACOV>2.0.CO;2. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Parameterized eddy advection in an ocean model means the part due to a scheme representing parameterized eddy-induced advective effects not included in the resolved model velocity field. Parameterized eddy advection can be represented on various spatial scales and there are standard names for parameterized_mesoscale_eddy_advection and parameterized_submesoscale_eddy_advection which both contribute to the total parameterized eddy advection. Additionally, when the parameterized advective process is represented in the model as a skew-diffusion rather than an advection, then the parameterized skew diffusion should be included in this diagnostic. The convergence of a skew-flux is identical (in the continuous formulation) to the convergence of an advective flux, making their tendencies the same. @@ -28152,7 +31127,7 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific W m-2 - The phrase "tendency_of_X" means derivative of X with respect to time. The phrase "expressed_as_heat_content" means that this quantity is calculated as the specific heat capacity times density of sea water multiplied by the conservative temperature of the sea water in the grid cell and integrated over depth. If used for a layer heat content, coordinate bounds should be used to define the extent of the layers. If no coordinate bounds are specified, it is assumed that the integral is calculated over the entire vertical extent of the medium, e.g, if the medium is sea water the integral is assumed to be calculated over the full depth of the ocean. Conservative Temperature is defined as part of the Thermodynamic Equation of Seawater 2010 (TEOS-10) which was adopted in 2010 by the International Oceanographic Commission (IOC). Conservative Temperature is specific potential enthalpy (which has the standard name sea_water_specific_potential_enthalpy) divided by a fixed value of the specific heat capacity of sea water, namely cp_0 = 3991.86795711963 J kg-1 K-1. Conservative Temperature is a more accurate measure of the "heat content" of sea water, by a factor of one hundred, than is potential temperature. Because of this, it can be regarded as being proportional to the heat content of sea water per unit mass. Reference: www.teos-10.org; McDougall, 2003 doi: 10.1175/1520-0485(2003)033<0945:PEACOV>2.0.CO;2. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Parameterized eddy advection in an ocean model means the part due to a scheme representing parameterized eddy-induced advective effects not included in the resolved model velocity field. Parameterized submesoscale eddy advection occurs on a spatial scale of the order of 1 km horizontally. Reference: James C. McWilliams 2016, Submesoscale currents in the ocean, Proceedings of the Royal Society A: Mathematical, Physical and Engineering Sciences, volume 472, issue 2189. DOI: 10.1098/rspa.2016.0117. There are also standard names for parameterized_mesoscale_eddy_advection which, along with parameterized_submesoscale_eddy_advection, contributes to the total parameterized eddy advection. Additionally, when the parameterized advective process is represented in the model as a skew-diffusion rather than an advection, then the parameterized skew diffusion should be included in this diagnostic. The convergence of a skew-flux is identical (in the continuous formulation) to the convergence of an advective flux, making their tendencies the same. + The phrase "tendency_of_X" means derivative of X with respect to time. The phrase "expressed_as_heat_content" means that this quantity is calculated as the specific heat capacity times density of sea water multiplied by the conservative temperature of the sea water in the grid cell and integrated over depth. If used for a layer heat content, coordinate bounds should be used to define the extent of the layers. If no coordinate bounds are specified, it is assumed that the integral is calculated over the entire vertical extent of the medium, e.g, if the medium is sea water the integral is assumed to be calculated over the full depth of the ocean. Conservative Temperature is defined as part of the Thermodynamic Equation of Seawater 2010 (TEOS-10) which was adopted in 2010 by the International Oceanographic Commission (IOC). Conservative Temperature is specific potential enthalpy (which has the standard name sea_water_specific_potential_enthalpy) divided by a fixed value of the specific heat capacity of sea water, namely cp_0 = 3991.86795711963 J kg-1 K-1. Conservative Temperature is a more accurate measure of the "heat content" of sea water, by a factor of one hundred, than is potential temperature. Because of this, it can be regarded as being proportional to the heat content of sea water per unit mass. Reference: www.teos-10.org; McDougall, 2003 doi: 10.1175/1520-0485(2003)033<0945:PEACOV>2.0.CO;2. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Parameterized eddy advection in an ocean model means the part due to a scheme representing parameterized eddy-induced advective effects not included in the resolved model velocity field. Parameterized submesoscale eddy advection occurs on a spatial scale of the order of 1 km horizontally. Reference: James C. McWilliams 2016, Submesoscale currents in the ocean, Proceedings of the Royal Society A: Mathematical, Physical and Engineering Sciences, volume 472, issue 2189. DOI: 10.1098/rspa.2016.0117. There are also standard names for parameterized_mesoscale_eddy_advection which, along with parameterized_submesoscale_eddy_advection, contributes to the total parameterized eddy advection. Additionally, when the parameterized advective process is represented in the model as a skew-diffusion rather than an advection, then the parameterized skew diffusion should be included in this diagnostic. The convergence of a skew-flux is identical (in the continuous formulation) to the convergence of an advective flux, making their tendencies the same. @@ -28169,18 +31144,18 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific The phrase "tendency_of_X" means derivative of X with respect to time. This tendency encompasses all processes that impact on the time changes for the heat content within a grid cell. The phrase "expressed_as_heat_content" means that this quantity is calculated as the specific heat capacity times density of sea water multiplied by the potential temperature of the sea water in the grid cell and integrated over depth. If used for a layer heat content, coordinate bounds should be used to define the extent of the layers. If no coordinate bounds are specified, it is assumed that the integral is calculated over the entire vertical extent of the medium, e.g, if the medium is sea water the integral is assumed to be calculated over the full depth of the ocean. Potential temperature is the temperature a parcel of air or sea water would have if moved adiabatically to sea level pressure. - + W m-2 - The phrase "tendency_of_X" means derivative of X with respect to time. The phrase "expressed_as_heat_content" means that this quantity is calculated as the specific heat capacity times density of sea water multiplied by the potential temperature of the sea water in the grid cell and integrated over depth. If used for a layer heat content, coordinate bounds should be used to define the extent of the layers. If no coordinate bounds are specified, it is assumed that the integral is calculated over the entire vertical extent of the medium, e.g, if the medium is sea water the integral is assumed to be calculated over the full depth of the ocean. Potential temperature is the temperature a parcel of air or sea water would have if moved adiabatically to sea level pressure. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Parameterized eddy advection in an ocean model means the part due to a scheme representing parameterized eddy-induced advective effects not included in the resolved model velocity field. Parameterized eddy advection can be represented on various spatial scales and there are standard names for parameterized_mesoscale_eddy_advection and parameterized_submesoscale_eddy_advection which both contribute to the total parameterized eddy advection. Additionally, when the parameterized advective process is represented in the model as a skew-diffusion rather than an advection, then the parameterized skew diffusion should be included in this diagnostic. The convergence of a skew-flux is identical (in the continuous formulation) to the convergence of an advective flux, making their tendencies the same. + The phrase "tendency_of_X" means derivative of X with respect to time. The phrase "expressed_as_heat_content" means that this quantity is calculated as the specific heat capacity times density of sea water multiplied by the potential temperature of the sea water in the grid cell and integrated over depth. If used for a layer heat content, coordinate bounds should be used to define the extent of the layers. If no coordinate bounds are specified, it is assumed that the integral is calculated over the entire vertical extent of the medium, e.g, if the medium is sea water the integral is assumed to be calculated over the full depth of the ocean. Potential temperature is the temperature a parcel of air or sea water would have if moved adiabatically to sea level pressure. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Dianeutral mixing" means mixing across surfaces of neutral buoyancy. "Parameterized" means the part due to a scheme representing processes which are not explicitly resolved by the model. - + W m-2 - The phrase "tendency_of_X" means derivative of X with respect to time. The phrase "expressed_as_heat_content" means that this quantity is calculated as the specific heat capacity times density of sea water multiplied by the potential temperature of the sea water in the grid cell and integrated over depth. If used for a layer heat content, coordinate bounds should be used to define the extent of the layers. If no coordinate bounds are specified, it is assumed that the integral is calculated over the entire vertical extent of the medium, e.g, if the medium is sea water the integral is assumed to be calculated over the full depth of the ocean. Potential temperature is the temperature a parcel of air or sea water would have if moved adiabatically to sea level pressure. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Eddy dianeutral mixing" means dianeutral mixing, i.e. mixing across neutral directions caused by the unresolved turbulent motion of eddies of all types (e.g., breaking gravity waves, boundary layer turbulence, etc.). + The phrase "tendency_of_X" means derivative of X with respect to time. The phrase "expressed_as_heat_content" means that this quantity is calculated as the specific heat capacity times density of sea water multiplied by the potential temperature of the sea water in the grid cell and integrated over depth. If used for a layer heat content, coordinate bounds should be used to define the extent of the layers. If no coordinate bounds are specified, it is assumed that the integral is calculated over the entire vertical extent of the medium, e.g, if the medium is sea water the integral is assumed to be calculated over the full depth of the ocean. Potential temperature is the temperature a parcel of air or sea water would have if moved adiabatically to sea level pressure. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Parameterized eddy advection in an ocean model means the part due to a scheme representing parameterized eddy-induced advective effects not included in the resolved model velocity field. Parameterized eddy advection can be represented on various spatial scales and there are standard names for parameterized_mesoscale_eddy_advection and parameterized_submesoscale_eddy_advection which both contribute to the total parameterized eddy advection. Additionally, when the parameterized advective process is represented in the model as a skew-diffusion rather than an advection, then the parameterized skew diffusion should be included in this diagnostic. The convergence of a skew-flux is identical (in the continuous formulation) to the convergence of an advective flux, making their tendencies the same. @@ -28201,7 +31176,7 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific W m-2 - The phrase "tendency_of_X" means derivative of X with respect to time. The phrase "expressed_as_heat_content" means that this quantity is calculated as the specific heat capacity times density of sea water multiplied by the potential temperature of the sea water in the grid cell and integrated over depth. If used for a layer heat content, coordinate bounds should be used to define the extent of the layers. If no coordinate bounds are specified, it is assumed that the integral is calculated over the entire vertical extent of the medium, e.g, if the medium is sea water the integral is assumed to be calculated over the full depth of the ocean. Potential temperature is the temperature a parcel of air or sea water would have if moved adiabatically to sea level pressure. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Parameterized eddy advection in an ocean model means the part due to a scheme representing parameterized eddy-induced advective effects not included in the resolved model velocity field. Parameterized submesoscale eddy advection occurs on a spatial scale of the order of 1 km horizontally. Reference: James C. McWilliams 2016, Submesoscale currents in the ocean, Proceedings of the Royal Society A: Mathematical, Physical and Engineering Sciences, volume 472, issue 2189. DOI: 10.1098/rspa.2016.0117. There are also standard names for parameterized_mesoscale_eddy_advection which, along with parameterized_submesoscale_eddy_advection, contributes to the total parameterized eddy advection. Additionally, when the parameterized advective process is represented in the model as a skew-diffusion rather than an advection, then the parameterized skew diffusion should be included in this diagnostic. The convergence of a skew-flux is identical (in the continuous formulation) to the convergence of an advective flux, making their tendencies the same. + The phrase "tendency_of_X" means derivative of X with respect to time. The phrase "expressed_as_heat_content" means that this quantity is calculated as the specific heat capacity times density of sea water multiplied by the potential temperature of the sea water in the grid cell and integrated over depth. If used for a layer heat content, coordinate bounds should be used to define the extent of the layers. If no coordinate bounds are specified, it is assumed that the integral is calculated over the entire vertical extent of the medium, e.g, if the medium is sea water the integral is assumed to be calculated over the full depth of the ocean. Potential temperature is the temperature a parcel of air or sea water would have if moved adiabatically to sea level pressure. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Parameterized eddy advection in an ocean model means the part due to a scheme representing parameterized eddy-induced advective effects not included in the resolved model velocity field. Parameterized submesoscale eddy advection occurs on a spatial scale of the order of 1 km horizontally. Reference: James C. McWilliams 2016, Submesoscale currents in the ocean, Proceedings of the Royal Society A: Mathematical, Physical and Engineering Sciences, volume 472, issue 2189. DOI: 10.1098/rspa.2016.0117. There are also standard names for parameterized_mesoscale_eddy_advection which, along with parameterized_submesoscale_eddy_advection, contributes to the total parameterized eddy advection. Additionally, when the parameterized advective process is represented in the model as a skew-diffusion rather than an advection, then the parameterized skew diffusion should be included in this diagnostic. The convergence of a skew-flux is identical (in the continuous formulation) to the convergence of an advective flux, making their tendencies the same. @@ -28260,18 +31235,18 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific "Content" indicates a quantity per unit area. "tendency_of_X" means derivative of X with respect to time. This tendency encompasses all processes that impact on the time changes for the salt content within a grid cell. - + kg m-2 s-1 - The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Parameterized eddy advection in an ocean model means the part due to a scheme representing parameterized eddy-induced advective effects not included in the resolved model velocity field. Parameterized eddy advection can be represented on various spatial scales and there are standard names for parameterized_mesoscale_eddy_advection and parameterized_submesoscale_eddy_advection which both contribute to the total parameterized eddy advection. Additionally, when the parameterized advective process is represented in the model as a skew-diffusion rather than an advection, then the parameterized skew diffusion should be included in this diagnostic. The convergence of a skew-flux is identical (in the continuous formulation) to the convergence of an advective flux, making their tendencies the same. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Dianeutral mixing" means mixing across surfaces of neutral buoyancy. "Parameterized" means the part due to a scheme representing processes which are not explicitly resolved by the model. - + kg m-2 s-1 - The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Eddy dianeutral mixing" means dianeutral mixing, i.e. mixing across neutral directions caused by the unresolved turbulent motion of eddies of all types (e.g., breaking gravity waves, boundary layer turbulence, etc.). + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Parameterized eddy advection in an ocean model means the part due to a scheme representing parameterized eddy-induced advective effects not included in the resolved model velocity field. Parameterized eddy advection can be represented on various spatial scales and there are standard names for parameterized_mesoscale_eddy_advection and parameterized_submesoscale_eddy_advection which both contribute to the total parameterized eddy advection. Additionally, when the parameterized advective process is represented in the model as a skew-diffusion rather than an advection, then the parameterized skew diffusion should be included in this diagnostic. The convergence of a skew-flux is identical (in the continuous formulation) to the convergence of an advective flux, making their tendencies the same. @@ -28292,7 +31267,7 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific kg m-2 s-1 - The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Parameterized eddy advection in an ocean model means the part due to a scheme representing parameterized eddy-induced advective effects not included in the resolved model velocity field. Parameterized submesoscale eddy advection occurs on a spatial scale of the order of 1 km horizontally. Reference: James C. McWilliams 2016, Submesoscale currents in the ocean, Proceedings of the Royal Society A: Mathematical, Physical and Engineering Sciences, volume 472, issue 2189. DOI: 10.1098/rspa.2016.0117. There are also standard names for parameterized_mesoscale_eddy_advection which, along with parameterized_submesoscale_eddy_advection, contributes to the total parameterized eddy advection. Additionally, when the parameterized advective process is represented in the model as a skew-diffusion rather than an advection, then the parameterized skew diffusion should be included in this diagnostic. The convergence of a skew-flux is identical (in the continuous formulation) to the convergence of an advective flux, making their tendencies the same. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Parameterized eddy advection in an ocean model means the part due to a scheme representing parameterized eddy-induced advective effects not included in the resolved model velocity field. Parameterized submesoscale eddy advection occurs on a spatial scale of the order of 1 km horizontally. Reference: James C. McWilliams 2016, Submesoscale currents in the ocean, Proceedings of the Royal Society A: Mathematical, Physical and Engineering Sciences, volume 472, issue 2189. DOI: 10.1098/rspa.2016.0117. There are also standard names for parameterized_mesoscale_eddy_advection which, along with parameterized_submesoscale_eddy_advection, contributes to the total parameterized eddy advection. Additionally, when the parameterized advective process is represented in the model as a skew-diffusion rather than an advection, then the parameterized skew diffusion should be included in this diagnostic. The convergence of a skew-flux is identical (in the continuous formulation) to the convergence of an advective flux, making their tendencies the same. @@ -28306,35 +31281,35 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific K s-1 - "tendency_of_X" means derivative of X with respect to time. Sea water temperature is the in situ temperature of the sea water. For observed data, depending on the period during which the observation was made, the measured in situ temperature was recorded against standard "scales". These historical scales include the International Practical Temperature Scale of 1948 (IPTS-48; 1948-1967), the International Practical Temperature Scale of 1968 (IPTS-68, Barber, 1969; 1968-1989) and the International Temperature Scale of 1990 (ITS-90, Saunders 1990; 1990 onwards). Conversion of data between these scales follows t68 = t48 - (4.4 x 10e-6) * t48(100 - t - 48); t90 = 0.99976 * t68. Observations made prior to 1948 (IPTS-48) have not been documented and therefore a conversion cannot be certain. Differences between t90 and t68 can be up to 0.01 at temperatures of 40 C and above; differences of 0.002-0.007 occur across the standard range of ocean temperatures (-10 - 30 C). The International Equation of State of Seawater 1980 (EOS-80, UNESCO, 1981) and the Practical Salinity Scale (PSS-78) were both based on IPTS-68, while the Thermodynamic Equation of Seawater 2010 (TEOS-10) is based on ITS-90. References: Barber, 1969, doi: 10.1088/0026-1394/5/2/001; UNESCO, 1981; Saunders, 1990, WOCE Newsletter, 10, September 1990. + The phrase "tendency_of_X" means derivative of X with respect to time. Sea water temperature is the in situ temperature of the sea water. For observed data, depending on the period during which the observation was made, the measured in situ temperature was recorded against standard "scales". These historical scales include the International Practical Temperature Scale of 1948 (IPTS-48; 1948-1967), the International Practical Temperature Scale of 1968 (IPTS-68, Barber, 1969; 1968-1989) and the International Temperature Scale of 1990 (ITS-90, Saunders 1990; 1990 onwards). Conversion of data between these scales follows t68 = t48 - (4.4 x 10e-6) * t48(100 - t - 48); t90 = 0.99976 * t68. Observations made prior to 1948 (IPTS-48) have not been documented and therefore a conversion cannot be certain. Differences between t90 and t68 can be up to 0.01 at temperatures of 40 C and above; differences of 0.002-0.007 occur across the standard range of ocean temperatures (-10 - 30 C). The International Equation of State of Seawater 1980 (EOS-80, UNESCO, 1981) and the Practical Salinity Scale (PSS-78) were both based on IPTS-68, while the Thermodynamic Equation of Seawater 2010 (TEOS-10) is based on ITS-90. References: Barber, 1969, doi: 10.1088/0026-1394/5/2/001; UNESCO, 1981; Saunders, 1990, WOCE Newsletter, 10, September 1990. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K s-1 - "tendency_of_X" means derivative of X with respect to time. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Sea water temperature is the in situ temperature of the sea water. For observed data, depending on the period during which the observation was made, the measured in situ temperature was recorded against standard "scales". These historical scales include the International Practical Temperature Scale of 1948 (IPTS-48; 1948-1967), the International Practical Temperature Scale of 1968 (IPTS-68, Barber, 1969; 1968-1989) and the International Temperature Scale of 1990 (ITS-90, Saunders 1990; 1990 onwards). Conversion of data between these scales follows t68 = t48 - (4.4 x 10e-6) * t48(100 - t - 48); t90 = 0.99976 * t68. Observations made prior to 1948 (IPTS-48) have not been documented and therefore a conversion cannot be certain. Differences between t90 and t68 can be up to 0.01 at temperatures of 40 C and above; differences of 0.002-0.007 occur across the standard range of ocean temperatures (-10 - 30 C). The International Equation of State of Seawater 1980 (EOS-80, UNESCO, 1981) and the Practical Salinity Scale (PSS-78) were both based on IPTS-68, while the Thermodynamic Equation of Seawater 2010 (TEOS-10) is based on ITS-90. References: Barber, 1969, doi: 10.1088/0026-1394/5/2/001; UNESCO, 1981; Saunders, 1990, WOCE Newsletter, 10, September 1990. + The phrase "tendency_of_X" means derivative of X with respect to time. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Sea water temperature is the in situ temperature of the sea water. For observed data, depending on the period during which the observation was made, the measured in situ temperature was recorded against standard "scales". These historical scales include the International Practical Temperature Scale of 1948 (IPTS-48; 1948-1967), the International Practical Temperature Scale of 1968 (IPTS-68, Barber, 1969; 1968-1989) and the International Temperature Scale of 1990 (ITS-90, Saunders 1990; 1990 onwards). Conversion of data between these scales follows t68 = t48 - (4.4 x 10e-6) * t48(100 - t - 48); t90 = 0.99976 * t68. Observations made prior to 1948 (IPTS-48) have not been documented and therefore a conversion cannot be certain. Differences between t90 and t68 can be up to 0.01 at temperatures of 40 C and above; differences of 0.002-0.007 occur across the standard range of ocean temperatures (-10 - 30 C). The International Equation of State of Seawater 1980 (EOS-80, UNESCO, 1981) and the Practical Salinity Scale (PSS-78) were both based on IPTS-68, while the Thermodynamic Equation of Seawater 2010 (TEOS-10) is based on ITS-90. References: Barber, 1969, doi: 10.1088/0026-1394/5/2/001; UNESCO, 1981; Saunders, 1990, WOCE Newsletter, 10, September 1990. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K s-1 - "tendency_of_X" means derivative of X with respect to time. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Horizontal mixing" means any horizontal transport other than by advection and parameterized eddy advection, usually represented as horizontal diffusion in ocean models. Sea water temperature is the in situ temperature of the sea water. For observed data, depending on the period during which the observation was made, the measured in situ temperature was recorded against standard "scales". These historical scales include the International Practical Temperature Scale of 1948 (IPTS-48; 1948-1967), the International Practical Temperature Scale of 1968 (IPTS-68, Barber, 1969; 1968-1989) and the International Temperature Scale of 1990 (ITS-90, Saunders 1990; 1990 onwards). Conversion of data between these scales follows t68 = t48 - (4.4 x 10e-6) * t48(100 - t - 48); t90 = 0.99976 * t68. Observations made prior to 1948 (IPTS-48) have not been documented and therefore a conversion cannot be certain. Differences between t90 and t68 can be up to 0.01 at temperatures of 40 C and above; differences of 0.002-0.007 occur across the standard range of ocean temperatures (-10 - 30 C). The International Equation of State of Seawater 1980 (EOS-80, UNESCO, 1981) and the Practical Salinity Scale (PSS-78) were both based on IPTS-68, while the Thermodynamic Equation of Seawater 2010 (TEOS-10) is based on ITS-90. References: Barber, 1969, doi: 10.1088/0026-1394/5/2/001; UNESCO, 1981; Saunders, 1990, WOCE Newsletter, 10, September 1990. + The phrase "tendency_of_X" means derivative of X with respect to time. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Horizontal mixing" means any horizontal transport other than by advection and parameterized eddy advection, usually represented as horizontal diffusion in ocean models. Sea water temperature is the in situ temperature of the sea water. For observed data, depending on the period during which the observation was made, the measured in situ temperature was recorded against standard "scales". These historical scales include the International Practical Temperature Scale of 1948 (IPTS-48; 1948-1967), the International Practical Temperature Scale of 1968 (IPTS-68, Barber, 1969; 1968-1989) and the International Temperature Scale of 1990 (ITS-90, Saunders 1990; 1990 onwards). Conversion of data between these scales follows t68 = t48 - (4.4 x 10e-6) * t48(100 - t - 48); t90 = 0.99976 * t68. Observations made prior to 1948 (IPTS-48) have not been documented and therefore a conversion cannot be certain. Differences between t90 and t68 can be up to 0.01 at temperatures of 40 C and above; differences of 0.002-0.007 occur across the standard range of ocean temperatures (-10 - 30 C). The International Equation of State of Seawater 1980 (EOS-80, UNESCO, 1981) and the Practical Salinity Scale (PSS-78) were both based on IPTS-68, while the Thermodynamic Equation of Seawater 2010 (TEOS-10) is based on ITS-90. References: Barber, 1969, doi: 10.1088/0026-1394/5/2/001; UNESCO, 1981; Saunders, 1990, WOCE Newsletter, 10, September 1990. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K s-1 - "tendency_of_X" means derivative of X with respect to time. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Parameterized eddy advection in an ocean model means the part due to a scheme representing parameterized eddy-induced advective effects not included in the resolved model velocity field. Parameterized eddy advection can be represented on various spatial scales and there are standard names for parameterized_mesoscale_eddy_advection and parameterized_submesoscale_eddy_advection which both contribute to the total parameterized eddy advection. Sea water temperature is the in situ temperature of the sea water. For observed data, depending on the period during which the observation was made, the measured in situ temperature was recorded against standard "scales". These historical scales include the International Practical Temperature Scale of 1948 (IPTS-48; 1948-1967), the International Practical Temperature Scale of 1968 (IPTS-68, Barber, 1969; 1968-1989) and the International Temperature Scale of 1990 (ITS-90, Saunders 1990; 1990 onwards). Conversion of data between these scales follows t68 = t48 - (4.4 x 10e-6) * t48(100 - t - 48); t90 = 0.99976 * t68. Observations made prior to 1948 (IPTS-48) have not been documented and therefore a conversion cannot be certain. Differences between t90 and t68 can be up to 0.01 at temperatures of 40 C and above; differences of 0.002-0.007 occur across the standard range of ocean temperatures (-10 - 30 C). The International Equation of State of Seawater 1980 (EOS-80, UNESCO, 1981) and the Practical Salinity Scale (PSS-78) were both based on IPTS-68, while the Thermodynamic Equation of Seawater 2010 (TEOS-10) is based on ITS-90. References: Barber, 1969, doi: 10.1088/0026-1394/5/2/001; UNESCO, 1981; Saunders, 1990, WOCE Newsletter, 10, September 1990. + The phrase "tendency_of_X" means derivative of X with respect to time. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Parameterized eddy advection in an ocean model means the part due to a scheme representing parameterized eddy-induced advective effects not included in the resolved model velocity field. Parameterized eddy advection can be represented on various spatial scales and there are standard names for parameterized_mesoscale_eddy_advection and parameterized_submesoscale_eddy_advection which both contribute to the total parameterized eddy advection. Sea water temperature is the in situ temperature of the sea water. For observed data, depending on the period during which the observation was made, the measured in situ temperature was recorded against standard "scales". These historical scales include the International Practical Temperature Scale of 1948 (IPTS-48; 1948-1967), the International Practical Temperature Scale of 1968 (IPTS-68, Barber, 1969; 1968-1989) and the International Temperature Scale of 1990 (ITS-90, Saunders 1990; 1990 onwards). Conversion of data between these scales follows t68 = t48 - (4.4 x 10e-6) * t48(100 - t - 48); t90 = 0.99976 * t68. Observations made prior to 1948 (IPTS-48) have not been documented and therefore a conversion cannot be certain. Differences between t90 and t68 can be up to 0.01 at temperatures of 40 C and above; differences of 0.002-0.007 occur across the standard range of ocean temperatures (-10 - 30 C). The International Equation of State of Seawater 1980 (EOS-80, UNESCO, 1981) and the Practical Salinity Scale (PSS-78) were both based on IPTS-68, while the Thermodynamic Equation of Seawater 2010 (TEOS-10) is based on ITS-90. References: Barber, 1969, doi: 10.1088/0026-1394/5/2/001; UNESCO, 1981; Saunders, 1990, WOCE Newsletter, 10, September 1990. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K s-1 - "tendency_of_X" means derivative of X with respect to time. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Vertical mixing" means any vertical transport other than by advection and parameterized eddy advection, represented by a combination of vertical diffusion, turbulent mixing and convection in ocean models. Sea water temperature is the in situ temperature of the sea water. For observed data, depending on the period during which the observation was made, the measured in situ temperature was recorded against standard "scales". These historical scales include the International Practical Temperature Scale of 1948 (IPTS-48; 1948-1967), the International Practical Temperature Scale of 1968 (IPTS-68, Barber, 1969; 1968-1989) and the International Temperature Scale of 1990 (ITS-90, Saunders 1990; 1990 onwards). Conversion of data between these scales follows t68 = t48 - (4.4 x 10e-6) * t48(100 - t - 48); t90 = 0.99976 * t68. Observations made prior to 1948 (IPTS-48) have not been documented and therefore a conversion cannot be certain. Differences between t90 and t68 can be up to 0.01 at temperatures of 40 C and above; differences of 0.002-0.007 occur across the standard range of ocean temperatures (-10 - 30 C). The International Equation of State of Seawater 1980 (EOS-80, UNESCO, 1981) and the Practical Salinity Scale (PSS-78) were both based on IPTS-68, while the Thermodynamic Equation of Seawater 2010 (TEOS-10) is based on ITS-90. References: Barber, 1969, doi: 10.1088/0026-1394/5/2/001; UNESCO, 1981; Saunders, 1990, WOCE Newsletter, 10, September 1990. + The phrase "tendency_of_X" means derivative of X with respect to time. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Vertical mixing" means any vertical transport other than by advection and parameterized eddy advection, represented by a combination of vertical diffusion, turbulent mixing and convection in ocean models. Sea water temperature is the in situ temperature of the sea water. For observed data, depending on the period during which the observation was made, the measured in situ temperature was recorded against standard "scales". These historical scales include the International Practical Temperature Scale of 1948 (IPTS-48; 1948-1967), the International Practical Temperature Scale of 1968 (IPTS-68, Barber, 1969; 1968-1989) and the International Temperature Scale of 1990 (ITS-90, Saunders 1990; 1990 onwards). Conversion of data between these scales follows t68 = t48 - (4.4 x 10e-6) * t48(100 - t - 48); t90 = 0.99976 * t68. Observations made prior to 1948 (IPTS-48) have not been documented and therefore a conversion cannot be certain. Differences between t90 and t68 can be up to 0.01 at temperatures of 40 C and above; differences of 0.002-0.007 occur across the standard range of ocean temperatures (-10 - 30 C). The International Equation of State of Seawater 1980 (EOS-80, UNESCO, 1981) and the Practical Salinity Scale (PSS-78) were both based on IPTS-68, while the Thermodynamic Equation of Seawater 2010 (TEOS-10) is based on ITS-90. References: Barber, 1969, doi: 10.1088/0026-1394/5/2/001; UNESCO, 1981; Saunders, 1990, WOCE Newsletter, 10, September 1990. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). @@ -28369,7 +31344,7 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific s-1 - The phrase "tendency_of_X" means derivative of X with respect to time. "Specific" means per unit mass. Specific humidity is the mass fraction of water vapor in (moist) air. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Boundary layer mixing" means turbulent motions that transport heat, water, momentum and chemical constituents within the atmospheric boundary layer and affect exchanges between the surface and the atmosphere. The atmospheric boundary layer is typically characterised by a well-mixed sub-cloud layer of order 500 metres, and by a more extended conditionally unstable layer with boundary-layer clouds up to 2 km. (Reference: IPCC Third Assessment Report, Working Group 1: The Scientific Basis, 7.2.2.3, https://www.ipcc.ch/ipccreports/tar/wg1/273.htm). + The phrase "tendency_of_X" means derivative of X with respect to time. "Specific" means per unit mass. Specific humidity is the mass fraction of water vapor in (moist) air. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Boundary layer mixing" means turbulent motions that transport heat, water, momentum and chemical constituents within the atmospheric boundary layer and affect exchanges between the surface and the atmosphere. The atmospheric boundary layer is typically characterised by a well-mixed sub-cloud layer of order 500 metres, and by a more extended conditionally unstable layer with boundary-layer clouds up to 2 km. (Reference: IPCC Third Assessment Report, Working Group 1: The Scientific Basis, 7.2.2.3, https://archive.ipcc.ch/ipccreports/tar/wg1/273.htm). @@ -28404,7 +31379,7 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific s-1 - The phrase "tendency_of_X" means derivative of X with respect to time. Specific humidity is the mass fraction of water vapor in (moist) air. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). "Precipitation" in the earth's atmosphere means precipitation of water in all phases. "Boundary layer mixing" means turbulent motions that transport heat, water, momentum and chemical constituents within the atmospheric boundary layer and affect exchanges between the surface and the atmosphere. The atmospheric boundary layer is typically characterised by a well-mixed sub-cloud layer of order 500 metres, and by a more extended conditionally unstable layer with boundary-layer clouds up to 2 km. (Reference: IPCC Third Assessment Report, Working Group 1: The Scientific Basis, 7.2.2.3, https://www.ipcc.ch/ipccreports/tar/wg1/273.htm). + The phrase "tendency_of_X" means derivative of X with respect to time. Specific humidity is the mass fraction of water vapor in (moist) air. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. In an atmosphere model, stratiform cloud is that produced by large-scale convergence (not the convection schemes). "Precipitation" in the earth's atmosphere means precipitation of water in all phases. "Boundary layer mixing" means turbulent motions that transport heat, water, momentum and chemical constituents within the atmospheric boundary layer and affect exchanges between the surface and the atmosphere. The atmospheric boundary layer is typically characterised by a well-mixed sub-cloud layer of order 500 metres, and by a more extended conditionally unstable layer with boundary-layer clouds up to 2 km. (Reference: IPCC Third Assessment Report, Working Group 1: The Scientific Basis, 7.2.2.3, https://archive.ipcc.ch/ipccreports/tar/wg1/273.htm). @@ -28425,35 +31400,35 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific kg m-2 s-1 - The surface called "surface" means the lower boundary of the atmosphere. "tendency_of_X" means derivative of X with respect to time. "Amount" means mass per unit area. Surface amount refers to the amount on the ground, excluding that on the plant or vegetation canopy. + The phrase "tendency_of_X" means derivative of X with respect to time. "Amount" means mass per unit area. Surface snow amount refers to the amount on the solid ground or on surface ice cover, but excludes, for example, falling snowflakes and snow on plants. kg m-2 s-1 - The phrase "tendency_of_X" means derivative of X with respect to time. "Amount" means mass per unit area. The phrase "surface_snow" means snow lying on the surface. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Conversion of snow to sea ice" occurs when the mass of snow accumulated on an area of sea ice is sufficient to cause the ice to become mostly submerged. Waves can then wash over the ice and snow surface and freeze into a layer that becomes "snow ice". "Sea ice" means all ice floating in the sea which has formed from freezing sea water, rather than by other processes such as calving of land ice to form icebergs. + The phrase "tendency_of_X" means derivative of X with respect to time. "Amount" means mass per unit area. Surface snow amount refers to the amount on the solid ground or on surface ice cover, but excludes, for example, falling snowflakes and snow on plants. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Conversion of snow to sea ice" occurs when the mass of snow accumulated on an area of sea ice is sufficient to cause the ice to become mostly submerged. Waves can then wash over the ice and snow surface and freeze into a layer that becomes "snow ice". "Sea ice" means all ice floating in the sea which has formed from freezing sea water, rather than by other processes such as calving of land ice to form icebergs. kg m-2 s-1 - The phrase "tendency_of_X" means derivative of X with respect to time. "Amount" means mass per unit area. The phrase "surface_snow" means snow lying on the surface. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. The quantity with standard name tendency_of_surface_snow_amount_due_to_drifting is the rate of change of snow amount caused by wind drift of snow into the sea. + The phrase "tendency_of_X" means derivative of X with respect to time. "Amount" means mass per unit area. Surface snow amount refers to the amount on the solid ground or on surface ice cover, but excludes, for example, falling snowflakes and snow on plants. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. kg m-2 s-1 - The phrase "tendency_of_X" means derivative of X with respect to time. "Amount" means mass per unit area. The phrase "surface_snow" means snow lying on the surface. The quantity with standard name tendency_of_surface_snow_amount_due_to_sea_ice_dynamics is the rate of change of snow amount caused by advection of the sea ice upon which the snow is lying. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Sea ice" means all ice floating in the sea which has formed from freezing sea water, rather than by other processes such as calving of land ice to form icebergs. "Sea ice dynamics" refers to advection of sea ice. + The quantity with standard name tendency_of_surface_snow_amount_due_to_sea_ice_dynamics is the rate of change of snow amount caused by advection of the sea ice upon which the snow is lying. The phrase "tendency_of_X" means derivative of X with respect to time. "Amount" means mass per unit area. Surface snow amount refers to the amount on the solid ground or on surface ice cover, but excludes, for example, falling snowflakes and snow on plants. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Sea ice dynamics" refers to advection of sea ice. "Sea ice" means all ice floating in the sea which has formed from freezing sea water, rather than by other processes such as calving of land ice to form icebergs. W m-2 - The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. Thermal energy is the total vibrational energy, kinetic and potential, of all the molecules and atoms in a substance. The phrase "surface_snow" means snow lying on the surface. The quantity with standard name tendency_of_thermal_energy_content_of_surface_snow_due_to_rainfall_temperature_excess_above_freezing is the heat energy carried by rainfall reaching the surface. It is calculated relative to the heat that would be carried by rainfall reaching the surface at zero degrees Celsius. It is calculated as the product QrainCpTrain, where Qrain is the mass flux of rainfall reaching the surface (kg m-2 s-1), Cp is the specific heat capacity of water and Train is the temperature in degrees Celsius of the rain water reaching the surface. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. + The phrase "tendency_of_X" means derivative of X with respect to time. "Content" indicates a quantity per unit area. Thermal energy is the total vibrational energy, kinetic and potential, of all the molecules and atoms in a substance. Surface snow refers to the snow on the solid ground or on surface ice cover, but excludes, for example, falling snowflakes and snow on plants. The quantity with standard name tendency_of_thermal_energy_content_of_surface_snow_due_to_rainfall_temperature_excess_above_freezing is the heat energy carried by rainfall reaching the surface. It is calculated relative to the heat that would be carried by rainfall reaching the surface at zero degrees Celsius. It is calculated as the product QrainCpTrain, where Qrain is the mass flux of rainfall reaching the surface (kg m-2 s-1), Cp is the specific heat capacity of water and Train is the temperature in degrees Celsius of the rain water reaching the surface. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. @@ -28467,14 +31442,14 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific mol s-1 - "tendency_of_X" means derivative of X with respect to time. "troposphere_moles_of_X" means the total number of moles of X contained in the troposphere, i.e, summed over that part of the atmospheric column and over the entire globe. The chemical formula of HCC140a is CH3CCl3. The IUPAC name for HCC140a is 1,1,1-trichloroethane. + The phrase "tendency_of_X" means derivative of X with respect to time. The construction "troposphere_moles_of_X" means the total number of moles of X contained in the troposphere, i.e, summed over that part of the atmospheric column and over the entire globe. The chemical formula of HCC140a, also called methyl chloroform, is CH3CCl3. The IUPAC name for HCC140a is 1,1,1-trichloroethane. mol s-1 - "tendency_of_X" means derivative of X with respect to time. "troposphere_moles_of_X" means the total number of moles of X contained in the troposphere, i.e, summed over that part of the atmospheric column and over the entire globe. The chemical formula of HCFC22 is CHClF2. The IUPAC name for HCFC 22 is chloro-difluoro-methane. + The phrase "tendency_of_X" means derivative of X with respect to time. The construction "troposphere_moles_of_X" means the total number of moles of X contained in the troposphere, i.e, summed over that part of the atmospheric column and over the entire globe. The chemical formula of HCFC22 is CHClF2. The IUPAC name for HCFC22 is chloro(difluoro)methane. @@ -28540,18 +31515,25 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "tendency_of_X" means derivative of X with respect to time. Speed is the magnitude of velocity. Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name upward_air_velocity.) The wind speed is the magnitude of the wind velocity. + + W m-1 K-1 + + + Thermal conductivity is the constant k in the formula q = -k grad T where q is the heat transfer per unit time per unit area of a surface normal to the direction of transfer and grad T is the temperature gradient. Thermal conductivity is a property of the material. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). + + J m-2 - "Content" indicates a quantity per unit area. Thermal energy is the total vibrational energy, kinetic and potential, of all the molecules and atoms in a substance. The surface called "surface" means the lower boundary of the atmosphere. + "Content" indicates a quantity per unit area. Thermal energy is the total vibrational energy, kinetic and potential, of all the molecules and atoms in a substance. Surface snow refers to the snow on the solid ground or on surface ice cover, but excludes, for example, falling snowflakes and snow on plants. - 1 + - "cloud_top" refers to the top of the highest cloud. "Water" means water in all phases. A variable with the standard name of thermodynamic_phase_of_cloud_water_particles_at_cloud_top contains integers which can be translated to strings using flag_values and flag_meanings attributes. Alternatively, the data variable may contain strings which indicate the thermodynamic phase. These strings are standardised. Values must be chosen from the following list: liquid; ice; mixed; clear_sky; super_cooled_liquid_water; unknown. + A variable with the standard name of thermodynamic_phase_of_cloud_water_particles_at_cloud_top contains integers which can be translated to strings using flag_values and flag_meanings attributes. Alternatively, the data variable may contain strings which indicate the thermodynamic phase. These strings are standardised. Values must be chosen from the following list: liquid; ice; mixed; clear_sky; super_cooled_liquid_water; unknown. "Water" means water in all phases. The phrase "cloud_top" refers to the top of the highest cloud. @@ -28610,6 +31592,13 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific "Amount" means mass per unit area. The construction thickness_of_[X_]snowfall_amount means the accumulated "depth" of snow which fell i.e. the thickness of the layer of snow at its own density. There are corresponding standard names for liquid water equivalent (lwe) thickness. + + m + + + Depth or height of the organic soil horizon (O or H horizons per the World Reference Base soil classification system), measured from the soil surface down to the mineral horizon. Organic layers are commonly composed of a succession of litter of recognizable origin, of partly decomposed litter, and of highly decomposed (humic) organic material. + + m @@ -28638,6 +31627,27 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific "Sea surface height" is a time-varying quantity. "Height_above_X" means the vertical distance above the named surface X. "Lowest astronomical tide" describes a local vertical reference based on the lowest water level that can be expected to occur under average meteorological conditions and under any combination of astronomical conditions. The tidal component of sea surface height describes the predicted variability of the sea surface due to astronomic forcing (chiefly lunar and solar cycles) and shallow water resonance of tidal components; for example as generated based on harmonic analysis, or resulting from the application of harmonic tidal series as boundary conditions to a numerical tidal model. + + m + + + "Sea surface height" is a time-varying quantity. "Height_above_X" means the vertical distance above the named surface X. "Mean higher high water" is the arithmetic mean of the higher high water height of each tidal day observed at a station over a Tidal Datum Epoch, which is a period of time that is usually greater than 18.6 years to include a full lunar cycle. Tidal datums in certain regions with anomalous sea level changes may be calculated using a shorter, or modified, Tidal Datum Epoch (e.g. 5 years). To specify the tidal datum epoch to which the quantity applies, provide a scalar coordinate variable with standard name reference_epoch. + + + + m + + + "Sea surface height" is a time-varying quantity. "Height_above_X" means the vertical distance above the named surface X. "Mean lower low water" is the arithmetic mean of the lower low water height of each tidal day observed at a station over a Tidal Datum Epoch, which is a period of time that is usually greater than 18.6 years to include a full lunar cycle. Tidal datums in certain regions with anomalous sea level changes may be calculated using a shorter, or modified, Tidal Datum Epoch (e.g. 5 years). To specify the tidal datum epoch to which the quantity applies, provide a scalar coordinate variable with standard name reference_epoch. + + + + m + + + "Sea surface height" is a time-varying quantity. "Height_above_X" means the vertical distance above the named surface X. "Mean low water springs" describes a local vertical reference based on the time mean of the low water levels during spring tides (the tides each lunar month with the greatest difference between high and low water that happen during full and new moons phases) expected to occur under average meteorological conditions and under any combination of astronomical conditions. The tidal component of sea surface height describes the predicted variability of the sea surface due to astronomic forcing (chiefly lunar and solar cycles) and shallow water resonance of tidal components; for example as generated based on harmonic analysis, or resulting from the application of harmonic tidal series as boundary conditions to a numerical tidal model. + + m @@ -28684,21 +31694,21 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific W m-2 - "longwave" means longwave radiation. "toa" means top of atmosphere. Adjusted forcing is the radiative flux change caused by an imposed change in radiative forcing agent (greenhouse gases, aerosol, solar radiation, etc.) after allowance for stratospheric temperature adjustment. + The abbreviation "toa" means top of atmosphere. The term "longwave" means longwave radiation. Adjusted forcing is the radiative flux change caused by an imposed change in radiative forcing agent (greenhouse gases, aerosol, solar radiation, etc.) after allowance for stratospheric temperature adjustment. A positive radiative forcing or radiative effect is equivalent to a downward radiative flux and contributes to a warming of the earth system. W m-2 - "toa" means top of atmosphere. Adjusted forcing is the radiative flux change caused by an imposed change in radiative forcing agent (greenhouse gases, aerosol, solar radiation, etc.) after allowance for stratospheric temperature adjustment. + The abbreviation "toa" means top of atmosphere. Adjusted forcing is the radiative flux change caused by an imposed change in radiative forcing agent (greenhouse gases, aerosol, solar radiation, etc.) after allowance for stratospheric temperature adjustment. A positive radiative forcing or radiative effect is equivalent to a downward radiative flux and contributes to a warming of the earth system. W m-2 - "shortwave" means shortwave radiation. "toa" means top of atmosphere. Adjusted forcing is the radiative flux change caused by an imposed change in radiative forcing agent (greenhouse gases, aerosol, solar radiation, etc.) after allowance for stratospheric temperature adjustment. + The abbreviation "toa" means top of atmosphere. The term "shortwave" means shortwave radiation. Adjusted forcing is the radiative flux change caused by an imposed change in radiative forcing agent (greenhouse gases, aerosol, solar radiation, etc.) after allowance for stratospheric temperature adjustment. A positive radiative forcing or radiative effect is equivalent to a downward radiative flux and contributes to a warming of the earth system. @@ -28712,37 +31722,35 @@ final_air_pressure_of_lifted_parcel should be specified to indicate the specific K - The brightness temperature of a body is the temperature of a black body which radiates the same power per unit solid angle per unit area. "toa" means top of atmosphere. + The brightness temperature of a body is the temperature of a black body which radiates the same power per unit solid angle per unit area. "toa" means top of atmosphere. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K - The brightness temperature of a body is the temperature of a black body which radiates the same power per unit solid angle per unit area. A phrase assuming_condition indicates that the named quantity is the value which would obtain if all aspects of the system were unaltered except for the assumption of the circumstances specified by the condition. "toa" means top of atmosphere. + The brightness temperature of a body is the temperature of a black body which radiates the same power per unit solid angle per unit area. A phrase assuming_condition indicates that the named quantity is the value which would obtain if all aspects of the system were unaltered except for the assumption of the circumstances specified by the condition. "toa" means top of atmosphere. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K - toa_brightness_temperature_bias_at_standard_scene_due_to_intercalibration is the difference between top-of-atmosphere (TOA) brightness temperature -of the reference sensor and TOA brightness temperature of the -monitored sensor. This TOA brightness temperature difference is a measure of the calibration difference between the monitored and reference sensors. The standard scene is a target area with typical Earth surface and atmospheric conditions that is accepted as a reference. Brightness temperature of a body is the temperature of a black body which radiates the same power per unit solid angle per unit area at a given wavenumber. TOA brightness temperature of the standard scene is calculated using a radiative transfer simulation for a given viewing geometry. The resultant top-of-atmosphere spectral radiance is then integrated with each sensor's spectral response function and converted to equivalent brightness temperature. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. + toa_brightness_temperature_bias_at_standard_scene_due_to_intercalibration is the difference between top-of-atmosphere (TOA) brightness temperature of the reference sensor and TOA brightness temperature of the monitored sensor. This TOA brightness temperature difference is a measure of the calibration difference between the monitored and reference sensors. The standard scene is a target area with typical Earth surface and atmospheric conditions that is accepted as a reference. Brightness temperature of a body is the temperature of a black body which radiates the same power per unit solid angle per unit area at a given wavenumber. TOA brightness temperature of the standard scene is calculated using a radiative transfer simulation for a given viewing geometry. The resultant top-of-atmosphere spectral radiance is then integrated with each sensor's spectral response function and converted to equivalent brightness temperature. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. It is strongly recommended that a variable with this standard name should have the attribute units_metadata="temperature: difference", meaning that it refers to temperature differences and implying that the origin of the temperature scale is irrelevant, because it is essential to know whether a temperature is on-scale or a difference in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). K - "toa" means top of atmosphere. The brightness temperature of a body is the temperature of a black body which radiates the same power per unit solid angle per unit area at a given wavenumber. The standard scene is a target area with typical Earth surface and atmospheric conditions that is accepted as a reference. The toa radiance of the standard scene is calculated using a radiative transfer model for a given viewing geometry. The resultant toa spectral radiance is then integrated with a sensor's spectral response function and converted to equivalent brightness temperature. + "toa" means top of atmosphere. The brightness temperature of a body is the temperature of a black body which radiates the same power per unit solid angle per unit area at a given wavenumber. The standard scene is a target area with typical Earth surface and atmospheric conditions that is accepted as a reference. The toa radiance of the standard scene is calculated using a radiative transfer model for a given viewing geometry. The resultant toa spectral radiance is then integrated with a sensor's spectral response function and converted to equivalent brightness temperature. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). W m-2 - "toa" means top of atmosphere. Cloud radiative effect is also commonly known as "cloud radiative forcing". It is the sum of the quantities with standard names toa_shortwave_cloud_radiative_effect and toa_longwave_cloud_radiative_effect. + The abbreviation "toa" means top of atmosphere. Cloud radiative effect is also commonly known as "cloud radiative forcing". It is the sum of the quantities with standard names toa_shortwave_cloud_radiative_effect and toa_longwave_cloud_radiative_effect. A positive radiative forcing or radiative effect is equivalent to a downward radiative flux and contributes to a warming of the earth system. @@ -28756,28 +31764,28 @@ monitored sensor. This TOA brightness temperature difference is a measure of the W m-2 - "longwave" means longwave radiation. "toa" means top of atmosphere. Instantaneous forcing is the radiative flux change caused instantaneously by an imposed change in radiative forcing agent (greenhouse gases, aerosol, solar radiation, etc.). + The abbreviation "toa" means top of atmosphere. The term "longwave" means longwave radiation. Instantaneous forcing is the radiative flux change caused instantaneously by an imposed change in radiative forcing agent (greenhouse gases, aerosol, solar radiation, etc.). A positive radiative forcing or radiative effect is equivalent to a downward radiative flux and contributes to a warming of the earth system. W m-2 - "toa" means top of atmosphere. Instantaneous forcing is the radiative flux change caused instantaneously by an imposed change in radiative forcing agent (greenhouse gases, aerosol, solar radiation, etc.). + The abbreviation "toa" means top of atmosphere. Instantaneous forcing is the radiative flux change caused instantaneously by an imposed change in radiative forcing agent (greenhouse gases, aerosol, solar radiation, etc.). A positive radiative forcing or radiative effect is equivalent to a downward radiative flux and contributes to a warming of the earth system. W m-2 - "shortwave" means shortwave radiation. "toa" means top of atmosphere. Instantaneous forcing is the radiative flux change caused instantaneously by an imposed change in radiative forcing agent (greenhouse gases, aerosol, solar radiation, etc.). + The abbreviation "toa" means top of atmosphere. The term "shortwave" means shortwave radiation. Instantaneous forcing is the radiative flux change caused instantaneously by an imposed change in radiative forcing agent (greenhouse gases, aerosol, solar radiation, etc.). A positive radiative forcing or radiative effect is equivalent to a downward radiative flux and contributes to a warming of the earth system. W m-2 - "toa" means top of atmosphere. "Longwave" means longwave radiation. Cloud radiative effect is also commonly known as "cloud radiative forcing". It is the difference in radiative flux resulting from the presence of clouds, i.e. it is the difference between toa_outgoing_longwave_flux_assuming_clear_sky and toa_outgoing_longwave_flux. + The abbreviation "toa" means top of atmosphere. The term "longwave" means longwave radiation. Cloud radiative effect is also commonly known as "cloud radiative forcing". It is the difference in radiative flux resulting from the presence of clouds. A positive radiative forcing or radiative effect is equivalent to a downward radiative flux and contributes to a warming of the earth system. The quantity with standard name toa_longwave_cloud_radiative_effect is the difference between those with standard names toa_outgoing_longwave_flux_assuming_clear_sky and toa_outgoing_longwave_flux. @@ -28857,6 +31865,20 @@ monitored sensor. This TOA brightness temperature difference is a measure of the A phrase assuming_condition indicates that the named quantity is the value which would obtain if all aspects of the system were unaltered except for the assumption of the circumstances specified by the condition. "longwave" means longwave radiation. "toa" means top of atmosphere. The TOA outgoing longwave flux is the upwelling thermal radiative flux, often called the "outgoing longwave radiation" or "OLR". In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. + + W/m2 + + + A phrase assuming_condition indicates that the named quantity is the value which would obtain if all aspects of the system were unaltered except for the assumption of the circumstances specified by the condition. "longwave" means longwave radiation. "toa" means top of atmosphere. The TOA outgoing longwave flux is the upwelling thermal radiative flux, often called the "outgoing longwave radiation" or "OLR". In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. "Clear sky" means in the absence of clouds. This 3D ozone field acts as a reference ozone field in a diagnostic call to the model's radiation scheme. It is expressed in terms of mole fraction of ozone in air. It may be observation-based or model-derived. It may be from any time period. By using the same ozone reference in the diagnostic radiation call in two model simulations and calculating differences between the radiative flux diagnostics from the prognostic call to the radiation scheme and the diagnostic call to the radiation scheme with the ozone reference, an instantaneous radiative forcing for ozone can be calculated. + + + + W/m2 + + + A phrase assuming_condition indicates that the named quantity is the value which would obtain if all aspects of the system were unaltered except for the assumption of the circumstances specified by the condition. "longwave" means longwave radiation. "toa" means top of atmosphere. The TOA outgoing longwave flux is the upwelling thermal radiative flux, often called the "outgoing longwave radiation" or "OLR". In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. This 3D ozone field acts as a reference ozone field in a diagnostic call to the model's radiation scheme. It is expressed in terms of mole fraction of ozone in air. It may be observation-based or model-derived. It may be from any time period. By using the same ozone reference in the diagnostic radiation call in two model simulations and calculating differences between the radiative flux diagnostics from the prognostic call to the radiation scheme and the diagnostic call to the radiation scheme with the ozone reference, an instantaneous radiative forcing for ozone can be calculated. + + W m-2 @@ -28871,6 +31893,13 @@ monitored sensor. This TOA brightness temperature difference is a measure of the "toa" means top of atmosphere. The TOA outgoing radiance is the upwelling radiance, i.e., toward outer space. Radiance is the radiative flux in a particular direction, per unit of solid angle. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. + + W m-2 sr-1 m-1 + + + The abbreviation "toa" means top of atmosphere. Radiance is the radiative flux in a particular direction, per unit of solid angle. The direction towards which it is going must be specified, for instance with a coordinate of zenith_angle. A coordinate variable for radiation wavelength should be given the standard name radiation_wavelength. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Some of the solar energy absorbed by pigment systems of plant leaves during photosynthesis is re-emitted as fluorescence. This is called solar-induced chlorophyll fluorescence (SIF). It is a radiance that can be measured on a global scale at various wavelengths and by multiple space borne instruments. SIF is considered a measurement of the photosynthetic machinery in plants and can provide a direct approach for the diagnosis of the actual functional status of vegetation. It is therefore considered a functional proxy of terrestrial gross primary productivity which has the standard name gross_primary_productivity_of_biomass_expressed_as_carbon. SIF spans the wavelength range 600 - 800 nm. + + W m-2 sr-1 (m-1)-1 @@ -28927,6 +31956,13 @@ monitored sensor. This TOA brightness temperature difference is a measure of the The abbreviation "toa" means top of atmosphere. The term "shortwave" means shortwave radiation. The TOA outgoing shortwave flux is the reflected and scattered solar radiative flux i.e. the "upwelling" TOA shortwave flux, sometimes called the "outgoing shortwave radiation" or "OSR". In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. A phrase "assuming_condition" indicates that the named quantity is the value which would obtain if all aspects of the system were unaltered except for the assumption of the circumstances specified by the condition. "Clear sky" means in the absence of clouds. + + W/m2 + + + The abbreviation "toa" means top of atmosphere. The term "shortwave" means shortwave radiation. The TOA outgoing shortwave flux is the reflected and scattered solar radiative flux i.e. the "upwelling" TOA shortwave flux, sometimes called the "outgoing shortwave radiation" or "OSR". In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. A phrase "assuming_condition" indicates that the named quantity is the value which would obtain if all aspects of the system were unaltered except for the assumption of the circumstances specified by the condition. "Clear sky" means in the absence of clouds. This 3D ozone field acts as a reference ozone field in a diagnostic call to the model's radiation scheme. It is expressed in terms of mole fraction of ozone in air. It may be observation-based or model-derived. It may be from any time period. By using the same ozone reference in the diagnostic radiation call in two model simulations and calculating differences between the radiative flux diagnostics from the prognostic call to the radiation scheme and the diagnostic call to the radiation scheme with the ozone reference, an instantaneous radiative forcing for ozone can be calculated. + + W m-2 @@ -28934,6 +31970,13 @@ monitored sensor. This TOA brightness temperature difference is a measure of the The abbreviation "toa" means top of atmosphere. The term "shortwave" means shortwave radiation. The TOA outgoing shortwave flux is the reflected and scattered solar radiative flux i.e. the "upwelling" TOA shortwave flux, sometimes called the "outgoing shortwave radiation" or "OSR". In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. A phrase "assuming_condition" indicates that the named quantity is the value which would obtain if all aspects of the system were unaltered except for the assumption of the circumstances specified by the condition. + + W/m2 + + + The abbreviation "toa" means top of atmosphere. The term "shortwave" means shortwave radiation. The TOA outgoing shortwave flux is the reflected and scattered solar radiative flux i.e. the "upwelling" TOA shortwave flux, sometimes called the "outgoing shortwave radiation" or "OSR". In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. A phrase "assuming_condition" indicates that the named quantity is the value which would obtain if all aspects of the system were unaltered except for the assumption of the circumstances specified by the condition. This 3D ozone field acts as a reference ozone field in a diagnostic call to the model's radiation scheme. It is expressed in terms of mole fraction of ozone in air. It may be observation-based or model-derived. It may be from any time period. By using the same ozone reference in the diagnostic radiation call in two model simulations and calculating differences between the radiative flux diagnostics from the prognostic call to the radiation scheme and the diagnostic call to the radiation scheme with the ozone reference, an instantaneous radiative forcing for ozone can be calculated. + + W m-2 @@ -28945,7 +31988,21 @@ monitored sensor. This TOA brightness temperature difference is a measure of the W m-2 - "toa" means top of atmosphere. "Shortwave" means shortwave radiation. Cloud radiative effect is also commonly known as "cloud radiative forcing". It is the difference in radiative flux resulting from the presence of clouds, i.e. the difference between toa_net_downward_shortwave_flux and toa_net_downward_shortwave_flux_assuming_clear_sky. + The abbreviation "toa" means top of atmosphere. The term "shortwave" means shortwave radiation. Cloud radiative effect is also commonly known as "cloud radiative forcing". It is the difference in radiative flux resulting from the presence of clouds. A positive radiative forcing or radiative effect is equivalent to a downward radiative flux and contributes to a warming of the earth system. The quantity with standard name toa_shortwave_cloud_radiative_effect is the difference between those with standard names toa_net_downward_shortwave_flux and toa_net_downward_shortwave_flux_assuming_clear_sky. + + + + degree + + + The quantity with standard name to_direction_of_air_velocity_relative_to_sea_water is the difference between the direction of motion of the air and the near-surface current. The phrase "to_direction" is used in the construction X_to_direction and indicates the direction towards which the velocity vector of X is headed. The direction is a bearing in the usual geographical sense, measured positive clockwise from due north. The components of the relative velocity vector have standard names eastward_air_velocity_relative_to_sea_water and northward_air_velocity_relative_to_sea_water. A vertical coordinate variable or scalar coordinate variable with standard name "depth" should be used to indicate the depth of sea water velocity used in the calculation. Similarly, a vertical coordinate variable or scalar coordinate with standard name "height" should be used to indicate the height of the the wind component. + + + + degree + + + The phrase "to_direction" is used in the construction X_to_direction and indicates the direction towards which the vector of X is headed. The direction is a bearing in the usual geographical sense, measured positive clockwise from due north. The surface called "surface" means the lower boundary of the atmosphere. "Downward" indicates a vector component which is positive when directed downward (negative upward). "Surface stress" means the shear stress (force per unit area) exerted by the wind at the surface. A downward stress is a downward flux of momentum. Over large bodies of water, wind stress can drive near-surface currents. @@ -28959,49 +32016,49 @@ monitored sensor. This TOA brightness temperature difference is a measure of the kg m-2 - "Amount" means mass per unit area. + "Amount" means mass per unit area. Transpiration is the process by which liquid water in plant stomata is transferred as water vapor into the atmosphere. kg m-2 s-1 - In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. + In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. Transpiration is the process by which liquid water in plant stomata is transferred as water vapor into the atmosphere. K - "tropical_cyclone_eye_brightness_temperature" means the warmest brightness temperature value in the eye region of a tropical cyclone (0 - 24 km from the storm center) derived using the Advanced Dvorak Technique, based on satellite observations. Reference: Olander, T. L., & Velden, C. S., The Advanced Dvorak Technique: Continued Development of an Objective Scheme to Estimate Tropical Cyclone Intensity Using Geostationary Infrared Satellite Imagery (2007). American Meterorological Society Weather and Forecasting, 22, 287-298. The brightness temperature of a body is the temperature of a black body which radiates the same power per unit solid angle per unit area. + The quantity with standard name tropical_cyclone_eye_brightness_temperature is the warmest brightness temperature value in the eye region of a tropical cyclone (0 - 24 km from the storm center) derived using the Advanced Dvorak Technique, based on satellite observations. Reference: Olander, T. L., & Velden, C. S., The Advanced Dvorak Technique: Continued Development of an Objective Scheme to Estimate Tropical Cyclone Intensity Using Geostationary Infrared Satellite Imagery (2007). American Meteorological Society Weather and Forecasting, 22, 287-298. The brightness temperature of a body is the temperature of a black body which radiates the same power per unit solid angle per unit area. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). m s-1 - "tropical_cyclone_maximum_sustained_wind_speed" means the maximum sustained wind speed of a tropical cyclone, sustained over a period of one minute at the surface of the earth, derived using the Advanced Dvorak Technique based on satellite observations. Reference: Olander, T. L., & Velden, C. S., The Advanced Dvorak Technique: Continued Development of an Objective Scheme to Estimate Tropical Cyclone Intensity Using Geostationary Infrared Satellite Imagery (2007). American Meterorological Society Weather and Forecasting, 22, 287-298. + The quantity with standard name tropical_cyclone_maximum_sustained_wind_speed is the maximum sustained wind speed of a tropical cyclone, sustained over a period of one minute at the surface of the earth, derived using the Advanced Dvorak Technique based on satellite observations. Reference: Olander, T. L., & Velden, C. S., The Advanced Dvorak Technique: Continued Development of an Objective Scheme to Estimate Tropical Cyclone Intensity Using Geostationary Infrared Satellite Imagery (2007). American Meteorological Society Weather and Forecasting, 22, 287-298. W m-2 - "longwave" means longwave radiation. Adjusted forcing is the radiative flux change caused by an imposed change in radiative forcing agent (greenhouse gases, aerosol, solar radiation, etc.) after allowance for stratospheric temperature adjustment. + The term "longwave" means longwave radiation. Adjusted forcing is the radiative flux change caused by an imposed change in radiative forcing agent (greenhouse gases, aerosol, solar radiation, etc.) after allowance for stratospheric temperature adjustment. A positive radiative forcing or radiative effect is equivalent to a downward radiative flux and contributes to a warming of the earth system. W m-2 - Adjusted forcing is the radiative flux change caused by an imposed change in radiative forcing agent (greenhouse gases, aerosol, solar radiation, etc.) after allowance for stratospheric temperature adjustment. + Adjusted forcing is the radiative flux change caused by an imposed change in radiative forcing agent (greenhouse gases, aerosol, solar radiation, etc.) after allowance for stratospheric temperature adjustment. A positive radiative forcing or radiative effect is equivalent to a downward radiative flux and contributes to a warming of the earth system. W m-2 - "shortwave" means shortwave radiation. Adjusted forcing is the radiative flux change caused by an imposed change in radiative forcing agent (greenhouse gases, aerosol, solar radiation, etc.) after allowance for stratospheric temperature adjustment. + The term "shortwave" means shortwave radiation. Adjusted forcing is the radiative flux change caused by an imposed change in radiative forcing agent (greenhouse gases, aerosol, solar radiation, etc.) after allowance for stratospheric temperature adjustment. A positive radiative forcing or radiative effect is equivalent to a downward radiative flux and contributes to a warming of the earth system. @@ -29015,7 +32072,7 @@ monitored sensor. This TOA brightness temperature difference is a measure of the K - Air temperature is the bulk temperature of the air, not the surface (skin) temperature. + Air temperature is the bulk temperature of the air, not the surface (skin) temperature. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). @@ -29036,21 +32093,21 @@ monitored sensor. This TOA brightness temperature difference is a measure of the W m-2 - "longwave" means longwave radiation. Instantaneous forcing is the radiative flux change caused instantaneously by an imposed change in radiative forcing agent (greenhouse gases, aerosol, solar radiation, etc.). + The term "longwave" means longwave radiation. Instantaneous forcing is the radiative flux change caused instantaneously by an imposed change in radiative forcing agent (greenhouse gases, aerosol, solar radiation, etc.). A positive radiative forcing or radiative effect is equivalent to a downward radiative flux and contributes to a warming of the earth system. W m-2 - Instantaneous forcing is the radiative flux change caused instantaneously by an imposed change in radiative forcing agent (greenhouse gases, aerosol, solar radiation, etc.). + Instantaneous forcing is the radiative flux change caused instantaneously by an imposed change in radiative forcing agent (greenhouse gases, aerosol, solar radiation, etc.). A positive radiative forcing or radiative effect is equivalent to a downward radiative flux and contributes to a warming of the earth system. W m-2 - "shortwave" means shortwave radiation. Instantaneous forcing is the radiative flux change caused instantaneously by an imposed change in radiative forcing agent (greenhouse gases, aerosol, solar radiation, etc.). + The term "shortwave" means shortwave radiation. Instantaneous forcing is the radiative flux change caused instantaneously by an imposed change in radiative forcing agent (greenhouse gases, aerosol, solar radiation, etc.). A positive radiative forcing or radiative effect is equivalent to a downward radiative flux and contributes to a warming of the earth system. @@ -29120,14 +32177,14 @@ monitored sensor. This TOA brightness temperature difference is a measure of the mol m-2 - "Content" indicates a quantity per unit area. The "troposphere content" of a quantity refers to the vertical integral from the surface to the tropopause. For the content between specified levels in the atmosphere, standard names including content_of_atmosphere_layer are used. The chemical formula for suflur_dioxide is SO2. + "Content" indicates a quantity per unit area. The "troposphere content" of a quantity refers to the vertical integral from the surface to the tropopause. For the content between specified levels in the atmosphere, standard names including "content_of_atmosphere_layer" are used. The chemical formula for sulfur dioxide is SO2. m - "Turbulent mixing length" is used in models to describe the average distance over which a fluid parcel can travel while retaining properties that allow the parcel to be distinguished from its immediate environonment. "Turbulent mixing" means chaotic fluctuations of the fluid flow. + "Turbulent mixing length" is used in models to describe the average distance over which a fluid parcel can travel while retaining properties that allow the parcel to be distinguished from its immediate environment. "Turbulent mixing" means chaotic fluctuations of the fluid flow. @@ -29151,6 +32208,13 @@ monitored sensor. This TOA brightness temperature difference is a measure of the The "Ultraviolet Index" (UVI) is a measure of the amount of solar ultraviolet radiation that reaches the surface of the earth depending on factors such as time of day and cloud cover. It is often used to alert the public of the need to limit sun exposure and use sun creams to protect the skin. Each point on the Index scale is equivalent to 25 mW m-2 of UV radiation (reference: Australian Bureau of Meteorology, http://www.bom.gov.au/uv/about_uv_index.shtml). The UVI range is expressed as a numeric value from 0 to 20 and sometimes graphically as bands of color indicating the attendant risk of skin damage. A UVI of 0-2 is described as 'Low' (represented graphically in green); a UVI of 11 or greater is described as "Extreme" (represented graphically in purple). The higher the UVI, the greater the potential health risk to humans and the less time it takes for harm to occur. A phrase "assuming_condition" indicates that the named quantity is the value which would obtain if all aspects of the system were unaltered except for the assumption of the circumstances specified by the condition. "Overcast" means a fractional sky cover of 95% or more when at least a portion of this amount is attributable to clouds or obscuring phenomena (such as haze, dust, smoke, fog, etc.) aloft. (Reference: AMS Glossary: http://glossary.ametsoc.org/wiki/Main_Page). Standard names are also defined for the quantities ultraviolet_index and ultraviolet_index_assuming_clear_sky. + + degree_C + + + Universal Thermal Comfort Index (UTCI) is an equivalent temperature of the actual thermal condition. Reference: utci.org. It is the air temperature of a reference condition causing the same dynamic physiological response in a human body considering its energy budget, physiology and clothing adaptation. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). + + m s-1 40 @@ -29158,6 +32222,27 @@ monitored sensor. This TOA brightness temperature difference is a measure of the A velocity is a vector quantity. "Upward" indicates a vector component which is positive when directed upward (negative downward). Upward air velocity is the vertical component of the 3D air velocity vector. The standard name downward_air_velocity may be used for a vector component with the opposite sign convention. + + s-1 + + + The quantity with standard name upward_derivative_of_eastward_wind is the derivative of the eastward component of wind with respect to height. The phrase "component_derivative_of_X" means derivative of X with respect to distance in the component direction, which may be "northward", "southward", "eastward", "westward", "upward", "downward", "x" or "y". The last two indicate derivatives along the axes of the grid, in the case where they are not true longitude and latitude. A positive value indicates that X is increasing with distance along the positive direction of the axis. Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name "upward_air_velocity"). + + + + s-1 + + + The quantity with standard name upward_derivative_of_northward_wind is the derivative of the northward component of wind speed with respect to height. The phrase "component_derivative_of_X" means derivative of X with respect to distance in the component direction, which may be "northward", "southward", "eastward", "westward", "upward", "downward", "x" or "y". The last two indicate derivatives along the axes of the grid, in the case where they are not true longitude and latitude. A positive value indicates that X is increasing with distance along the positive direction of the axis. Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name "upward_air_velocity"). + + + + degree m-1 + + + The quantity with standard name upward_derivative_of_wind_from_direction is the derivative of wind from_direction with respect to height. The phrase "component_derivative_of_X" means derivative of X with respect to distance in the component direction, which may be "northward", "southward", "eastward", "westward", "upward", "downward", "x" or "y". The last two indicate derivatives along the axes of the grid, in the case where they are not true longitude and latitude. A positive value indicates that X is increasing with distance along the positive direction of the axis. The phrase "from_direction" is used in the construction X_from_direction and indicates the direction from which the velocity vector of X is coming. The direction is a bearing in the usual geographical sense, measured positive clockwise from due north. In meteorological reports, the direction of the wind vector is usually (but not always) given as the direction from which it is blowing ("wind_from_direction") (westerly, northerly, etc.). In other contexts, such as atmospheric modelling, it is often natural to give the direction in the usual manner of vectors as the heading or the direction to which it is blowing ("wind_to_direction") (eastward, southward, etc.). Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name "upward_air_velocity"). + + W m-2 @@ -29249,6 +32334,13 @@ monitored sensor. This TOA brightness temperature difference is a measure of the "Upward" indicates a vector component which is positive when directed upward (negative downward). The latent heat flux is the exchange of heat across a surface on account of evaporation and condensation (including sublimation and deposition). In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. + + W m-2 + + + "Upward" indicates a vector component which is positive when directed upward (negative downward). In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Transpiration is the process by which liquid water in plant stomata is transferred as water vapor into the atmosphere. The latent heat flux due to transpiration is the release of latent heat from plant surfaces to the air due to the release of water vapor. + + kg m-2 s-1 @@ -29354,6 +32446,20 @@ monitored sensor. This TOA brightness temperature difference is a measure of the The term "longwave" means longwave radiation. Upwelling radiation is radiation from below. It does not mean "net upward". The sign convention is that "upwelling" is positive upwards and "downwelling" is positive downwards. When thought of as being incident on a surface, a radiative flux is sometimes called "irradiance". In addition, it is identical with the quantity measured by a cosine-collector light-meter and sometimes called "vector irradiance". In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. A phrase assuming_condition indicates that the named quantity is the value which would obtain if all aspects of the system were unaltered except for the assumption of the circumstances specified by the condition. "Clear sky" means in the absence of clouds. + + W/m2 + + + The term "longwave" means longwave radiation. Upwelling radiation is radiation from below. It does not mean "net upward". The sign convention is that "upwelling" is positive upwards and "downwelling" is positive downwards. When thought of as being incident on a surface, a radiative flux is sometimes called "irradiance". In addition, it is identical with the quantity measured by a cosine-collector light-meter and sometimes called "vector irradiance". In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. A phrase assuming_condition indicates that the named quantity is the value which would obtain if all aspects of the system were unaltered except for the assumption of the circumstances specified by the condition. "Clear sky" means in the absence of clouds. This 3D ozone field acts as a reference ozone field in a diagnostic call to the model's radiation scheme. It is expressed in terms of mole fraction of ozone in air. It may be observation-based or model-derived. It may be from any time period. By using the same ozone reference in the diagnostic radiation call in two model simulations and calculating differences between the radiative flux diagnostics from the prognostic call to the radiation scheme and the diagnostic call to the radiation scheme with the ozone reference, an instantaneous radiative forcing for ozone can be calculated. + + + + W/m2 + + + The term "longwave" means longwave radiation. Upwelling radiation is radiation from below. It does not mean "net upward". The sign convention is that "upwelling" is positive upwards and "downwelling" is positive downwards. When thought of as being incident on a surface, a radiative flux is sometimes called "irradiance". In addition, it is identical with the quantity measured by a cosine-collector light-meter and sometimes called "vector irradiance". In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. A phrase assuming_condition indicates that the named quantity is the value which would obtain if all aspects of the system were unaltered except for the assumption of the circumstances specified by the condition. This 3D ozone field acts as a reference ozone field in a diagnostic call to the model's radiation scheme. It is expressed in terms of mole fraction of ozone in air. It may be observation-based or model-derived. It may be from any time period. By using the same ozone reference in the diagnostic radiation call in two model simulations and calculating differences between the radiative flux diagnostics from the prognostic call to the radiation scheme and the diagnostic call to the radiation scheme with the ozone reference, an instantaneous radiative forcing for ozone can be calculated. + + W m-2 sr-1 @@ -29403,6 +32509,20 @@ monitored sensor. This TOA brightness temperature difference is a measure of the Upwelling radiation is radiation from below. It does not mean "net upward". The sign convention is that "upwelling" is positive upwards and "downwelling" is positive downwards. The term "shortwave" means shortwave radiation. When thought of as being incident on a surface, a radiative flux is sometimes called "irradiance". In addition, it is identical with the quantity measured by a cosine-collector light-meter and sometimes called "vector irradiance". In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. A phrase "assuming_condition" indicates that the named quantity is the value which would obtain if all aspects of the system were unaltered except for the assumption of the circumstances specified by the condition. "Clear sky" means in the absence of clouds. + + W/m2 + + + Upwelling radiation is radiation from below. It does not mean "net upward". The sign convention is that "upwelling" is positive upwards and "downwelling" is positive downwards. The term "shortwave" means shortwave radiation. When thought of as being incident on a surface, a radiative flux is sometimes called "irradiance". In addition, it is identical with the quantity measured by a cosine-collector light-meter and sometimes called "vector irradiance". In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. A phrase "assuming_condition" indicates that the named quantity is the value which would obtain if all aspects of the system were unaltered except for the assumption of the circumstances specified by the condition. "Clear sky" means in the absence of clouds. This 3D ozone field acts as a reference ozone field in a diagnostic call to the model's radiation scheme. It is expressed in terms of mole fraction of ozone in air. It may be observation-based or model-derived. It may be from any time period. By using the same ozone reference in the diagnostic radiation call in two model simulations and calculating differences between the radiative flux diagnostics from the prognostic call to the radiation scheme and the diagnostic call to the radiation scheme with the ozone reference, an instantaneous radiative forcing for ozone can be calculated. + + + + W/m2 + + + Upwelling radiation is radiation from below. It does not mean "net upward". The sign convention is that "upwelling" is positive upwards and "downwelling" is positive downwards. The term "shortwave" means shortwave radiation. When thought of as being incident on a surface, a radiative flux is sometimes called "irradiance". In addition, it is identical with the quantity measured by a cosine-collector light-meter and sometimes called "vector irradiance". In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. A phrase "assuming_condition" indicates that the named quantity is the value which would obtain if all aspects of the system were unaltered except for the assumption of the circumstances specified by the condition. This 3D ozone field acts as a reference ozone field in a diagnostic call to the model's radiation scheme. It is expressed in terms of mole fraction of ozone in air. It may be observation-based or model-derived. It may be from any time period. By using the same ozone reference in the diagnostic radiation call in two model simulations and calculating differences between the radiative flux diagnostics from the prognostic call to the radiation scheme and the diagnostic call to the radiation scheme with the ozone reference, an instantaneous radiative forcing for ozone can be calculated. + + W m-2 sr-1 @@ -29414,7 +32534,7 @@ monitored sensor. This TOA brightness temperature difference is a measure of the 1 87 - "X_area_fraction" means the fraction of horizontal area occupied by X. "X_area" means the horizontal area occupied by X within the grid cell. "Vegetation" means any plants e.g. trees, shrubs, grass. + "Area fraction" is the fraction of a grid cell's horizontal area that has some characteristic of interest. It is evaluated as the area of interest divided by the grid cell area. It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. "Vegetation" means any plants e.g. trees, shrubs, grass. The term "plants" refers to the kingdom of plants in the modern classification which excludes fungi. Plants are autotrophs i.e. "producers" of biomass using carbon obtained from carbon dioxide. @@ -29452,8 +32572,15 @@ monitored sensor. This TOA brightness temperature difference is a measure of the The vertical_component_of_ocean_xy_tracer_diffusivity means the vertical component of the diffusivity of tracers in the ocean due to lateral mixing. This quantity could appear in formulations of lateral diffusivity in which "lateral" does not mean "iso-level", e.g. it would not be used for isopycnal diffusivity. "Tracer diffusivity" means the diffusivity of heat and salinity due to motion which is not resolved on the grid scale of the model. - - kg m-2 s-1 + + m + + + "Vertical navigation clearance" is the vertical distance between the surface of a navigable waterway and a hazard above it such as a bridge. It is a time-varying quantity because the clearance distance is due to all processes that change the position of either the surface or the hazard. "Waterway surface" means the upper boundary of any body of navigable water. + + + + kg m-2 s-1 The virtual_salt_flux_into_sea_water_due_to_process is the salt flux that would have the same effect on the sea surface salinity as water_flux_out_of_sea_water_due_to_process. Flux correction is also called "flux adjustment". A positive flux correction is downward i.e. added to the ocean. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. @@ -29505,7 +32632,7 @@ monitored sensor. This TOA brightness temperature difference is a measure of the K 12 - The virtual temperature of air is the temperature at which the dry air constituent of a parcel of moist air would have the same density as the moist air at the same pressure. + The virtual temperature of air is the temperature at which the dry air constituent of a parcel of moist air would have the same density as the moist air at the same pressure. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). @@ -29515,2550 +32642,3534 @@ monitored sensor. This TOA brightness temperature difference is a measure of the The visibility is the distance at which something can be seen. - + m-1 - The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with "specific_" instead of "volume_". The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths unless a coordinate of "radiation_wavelength" or "radiation_frequency" is included to specify the wavelength. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Dried_aerosol" means that the aerosol sample has been dried from the ambient state, but that the dry state (relative humidity less than 40 per cent) has not necessarily been reached. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. - + m-1 - Radiative flux is the sum of shortwave and longwave radiative fluxes. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. - + m-1 - The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Radiative flux is the sum of shortwave and longwave radiative fluxes. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. - - m-1 sr-1 + + m-1 - Attenuation is the sum of absorption and scattering. Attenuation is sometimes called "extinction". The attenuated backwards scattering function includes the effects of two-way attenuation by the medium between a radar source and receiver. The volume scattering function is the fraction of incident radiative flux scattered into unit solid angle per unit path length. Backwards scattering refers to the sum of scattering into all backward angles i.e. scattering_angle exceeding pi/2 radians. A scattering_angle should not be specified with this quantity. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with "specific_" instead of "volume_". A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Ambient_aerosol" means that the aerosol is measured or modelled at the ambient state of pressure, temperature and relative humidity that exists in its immediate environment. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. - - m-1 sr-1 + + m-1 - Attenuation is the sum of absorption and scattering. Attenuation is sometimes called "extinction". The attenuated backwards scattering function includes the effects of two-way attenuation by the medium between a radar source and receiver. The volume scattering function is the fraction of incident radiative flux scattered into unit solid angle per unit path length. Backwards scattering refers to the sum of scattering into all backward angles i.e. scattering_angle exceeding pi/2 radians. A scattering_angle should not be specified with this quantity. A phrase "assuming_condition" indicates that the named quantity is the value which would obtain if all aspects of the system were unaltered except for the assumption of the circumstances specified by the condition. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with "specific_" instead of "volume_". The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths unless a coordinate of "radiation_wavelength" or "radiation_frequency" is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Dried_aerosol" means that the aerosol sample has been dried from the ambient state, but that the dry state (relative humidity less than 40 per cent) has not necessarily been reached. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. - + m-1 - Downwelling radiation is radiation from above. It does not mean "net downward". The sign convention is that "upwelling" is positive upwards and "downwelling" is positive downwards. Radiative flux is the sum of shortwave and longwave radiative fluxes. When thought of as being incident on a surface, a radiative flux is sometimes called "irradiance". In addition, it is identical with the quantity measured by a cosine-collector light-meter and sometimes called "vector irradiance". In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Attenuation is the sum of absorption and scattering. Attenuation is sometimes called "extinction". Also called "diffuse" attenuation, the attenuation of downwelling radiative flux refers to the decrease with decreasing height or increasing depth of the downwelling component of radiative flux, regardless of incident direction. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Dried_aerosol_particles" means that the aerosol sample has been dried from the ambient state before sizing, but that the dry state (relative humidity less than 40 per cent) has not necessarily been reached. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. - + m-1 - The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. Backwards scattering refers to the sum of scattering into all backward angles i.e. scattering_angle exceeds pi/2 radians. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Dried_aerosol" means that the aerosol sample has been dried from the ambient state, but that the dry state (relative humidity less than 40 per cent) has not necessarily been reached. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. - + m-1 - Radiative flux is the sum of shortwave and longwave radiative fluxes. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Scattering of radiation is its deflection from its incident path without loss of energy. Backwards scattering refers to the sum of scattering into all backward angles i.e. scattering_angle exceeding pi/2 radians. A scattering_angle should not be specified with this quantity. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. - + m-1 - Radiative flux is the sum of shortwave and longwave radiative fluxes. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Attenuation is the sum of absorption and scattering. Attenuation is sometimes called "extinction". Beam attenuation refers to the decrease of radiative flux along the direction of the incident path. It is distinguished from attenuation of the downwelling component of radiative flux from any incident direction, also called "diffuse" attenuation. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm10 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 10 micrometers. - + m-1 - Radiative flux is the sum of shortwave and longwave radiative fluxes. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Attenuation is the sum of absorption and scattering. Attenuation is sometimes called "extinction". Beam attenuation refers to the decrease of radiative flux along the direction of the incident path. It is distinguished from attenuation of the downwelling component of radiative flux from any incident direction, also called "diffuse" attenuation. The phrase "corrected for pure water attenuance" means the attenuation coefficient has been adjusted/calibrated to remove the influence of absorption/scattering by the water itself. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm10 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 10 micrometers. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. - - m -1 + + m-1 - The volume extinction coefficient is the fractional change of radiative flux per unit path length. Extinction is the sum of absorption and scattering, sometimes called "attenuation". "Extinction" is the term most commonly used at optical wavelengths whereas "attenuation" is more often used at radio and radar wavelengths. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Ambient_aerosol" means that the aerosol is measured or modelled at the ambient state of pressure, temperature and relative humidity that exists in its immediate environment. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm10 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 10 micrometers. - + m-1 - The volume extinction coefficient is the fractional change of radiative flux per unit path length. Extinction is the sum of absorption and scattering, sometimes called "attenuation". "Extinction" is the term most commonly used at optical wavelengths whereas "attenuation" is more often used at radio and radar wavelengths. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Cloud particles" means suspended liquid or ice water droplets. A coordinate of radiation_wavelength or radiation_frequency should be included to specify either the wavelength or frequency. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm10 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 10 micrometers. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. - - 1 + + m-1 - "Volume fraction" is used in the construction volume_fraction_of_X_in_Y, where X is a material constituent of Y. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Dried_aerosol_particles" means that the aerosol sample has been dried from the ambient state before sizing, but that the dry state (relative humidity less than 40 per cent) has not necessarily been reached. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm10 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 10 micrometers. - - 1 + + m-1 - "Condensed water" means liquid and ice. "Volume fraction" is used in the construction volume_fraction_of_X_in_Y, where X is a material constituent of Y. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Dried_aerosol_particles" means that the aerosol sample has been dried from the ambient state before sizing, but that the dry state (relative humidity less than 40 per cent) has not necessarily been reached. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm10 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 10 micrometers. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. - - 1 + + m-1 - "Condensed water" means liquid and ice. "Volume fraction" is used in the construction volume_fraction_of_X_in_Y, where X is a material constituent of Y. When soil moisture equals or exceeds the critical point evapotranspiration takes place at the potential rate and is controlled by the ambient meteorological conditions (temperature, wind, relative humidity). Evapotranspiration is the sum of evaporation and plant transpiration. Potential evapotranspiration is the rate at which evapotranspiration would occur under ambient conditions from a uniformly vegetated area when the water supply is not limiting. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm10 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 10 micrometers. - - 1 + + m-1 - "Condensed water" means liquid and ice. "Volume fraction" is used in the construction volume_fraction_of_X_in_Y, where X is a material constituent of Y. The field capacity of soil is the maximum content of water it can retain against gravitational drainage. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm10 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 10 micrometers. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. - - 1 + + m-1 - "Condensed water" means liquid and ice. "Volume fraction" is used in the construction volume_fraction_of_X_in_Y, where X is a material constituent of Y. The wilting point of soil is the water content below which plants cannot extract sufficient water to balance their loss through transpiration. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm1 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 1 micrometers. - - 1 + + m-1 - "Volume fraction" is used in the construction volume_fraction_of_X_in_Y, where X is a material constituent of Y. "Condensed water" means liquid and ice. "Volume_fraction_of_condensed_water_in_soil_pores" is the ratio of the volume of condensed water in soil pores to the volume of the pores themselves. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm1 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 1 micrometers. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. - - 1 + + m-1 - "frozen_water" means ice. "Volume fraction" is used in the construction volume_fraction_of_X_in_Y, where X is a material constituent of Y. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm1 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 1 micrometers. - - 1 + + m-1 - "Volume fraction" is used in the construction volume_fraction_of_X_in_Y, where X is a material constituent of Y. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm1 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 1 micrometers. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. - - 1 + + m-1 - "Volume fraction" is used in the construction volume_fraction_of_X_in_Y, where X is a material constituent of Y. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Dried_aerosol_particles" means that the aerosol sample has been dried from the ambient state before sizing, but that the dry state (relative humidity less than 40 per cent) has not necessarily been reached. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm1 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 1 micrometers. - - 1 + + m-1 - "Volume fraction" is used in the construction volume_fraction_of_X_in_Y, where X is a material constituent of Y. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Dried_aerosol_particles" means that the aerosol sample has been dried from the ambient state before sizing, but that the dry state (relative humidity less than 40 per cent) has not necessarily been reached. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm1 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 1 micrometers. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. - - 1 + + m-1 - "ratio_of_X_to_Y" means X/Y. "stp" means standard temperature (0 degC) and pressure (101325 Pa). + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm1 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 1 micrometers. - + m-1 - Radiative flux is the sum of shortwave and longwave radiative fluxes. Scattering of radiation is its deflection from its incident path without loss of energy. The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with "specific_" instead of "volume_". The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths unless a coordinate of "radiation_wavelength" or "radiation_frequency" is included to specify the wavelength. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Ambient_aerosol" means that the aerosol is measured or modelled at the ambient state of pressure, temperature and relative humidity that exist in its immediate environment. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. To specify the relative humidity at which the quantity described by the standard name applies, provide a scalar coordinate variable with the standard name of "relative_humidity". + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm1 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 1 micrometers. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. - + m-1 - Radiative flux is the sum of shortwave and longwave radiative fluxes. Scattering of radiation is its deflection from its incident path without loss of energy. The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with "specific_" instead of "volume_". The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths unless a coordinate of "radiation_wavelength" or "radiation_frequency" is included to specify the wavelength. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Dried_aerosol" means that the aerosol sample has been dried from the ambient state before sizing, but that the dry state (relative humidity less than 40 per cent) has not necessarily been reached. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". + The volume scattering coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm2p5 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 2.5 micrometers. - + m-1 - Radiative flux is the sum of shortwave and longwave radiative fluxes. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Scattering of radiation is its deflection from its incident path without loss of energy. The (range of) direction(s) of scattering can be specified by a coordinate of scattering_angle. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm10 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 2.5 micrometers. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. - - m-1 sr-1 + + m-1 - Radiative flux is the sum of shortwave and longwave radiative fluxes. Scattering of radiation is its deflection from its incident path without loss of energy. The volume scattering function is the intensity (flux per unit solid angle) of scattered radiation per unit length of scattering medium, normalised by the incident radiation flux. The (range of) direction(s) of scattering can be specified by a coordinate of scattering_angle. A coordinate variable of radiation_wavelength or radiation_frequency can be specified to indicate that the scattering applies at specific wavelengths or frequencies. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Ambient_aerosol" means that the aerosol is measured or modelled at the ambient state of pressure, temperature and relative humidity that exists in its immediate environment. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. To specify the relative humidity and temperature at which the quantity described by the standard name applies, provide scalar coordinate variables with standard names of "relative_humidity" and "air_temperature". + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm2p5 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 2.5 micrometers. - - m-1 sr-1 + + m-1 - Radiative flux is the sum of shortwave and longwave radiative fluxes. Scattering of radiation is its deflection from its incident path without loss of energy. The volume scattering function is the intensity (flux per unit solid angle) of scattered radiation per unit length of scattering medium, normalised by the incident radiation flux. The (range of) direction(s) of scattering can be specified by a coordinate of scattering_angle. A coordinate variable of radiation_wavelength or radiation_frequency can be specified to indicate that the scattering applies at specific wavelengths or frequencies. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm2p5 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 2.5 micrometers. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. - - kg m-2 - 57 + + m-1 + - "Amount" means mass per unit area. "Water" means water in all phases. Evaporation is the conversion of liquid or solid into vapor. (The conversion of solid alone into vapor is called "sublimation".) + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Dried_aerosol_particles" means that the aerosol sample has been dried from the ambient state before sizing, but that the dry state (relative humidity less than 40 per cent) has not necessarily been reached. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm2p5 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 2.5 micrometers. - - kg m-2 + + m-1 - "Amount" means mass per unit area. "Water" means water in all phases. Evaporation is the conversion of liquid or solid into vapor. (The conversion of solid alone into vapor is called "sublimation".) "Canopy" means the vegetative covering over a surface. The canopy is often considered to be the outer surfaces of the vegetation. Plant height and the distribution, orientation and shape of plant leaves within a canopy influence the atmospheric environment and many plant processes within the canopy. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Canopy. Unless indicated in the cell_methods attribute, a quantity is assumed to apply to the whole area of each horizontal grid box. Previously, the qualifier "where_type" was used to specify that the quantity applies only to the part of the grid box of the named type. Names containing the where_type qualifier are deprecated and newly created data should use the cell_methods attribute to indicate the horizontal area to which the quantity applies. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Dried_aerosol_particles" means that the aerosol sample has been dried from the ambient state before sizing, but that the dry state (relative humidity less than 40 per cent) has not necessarily been reached. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm2p5 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 2.5 micrometers. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. - - kg m-2 s-1 + + m-1 - "Water" means water in all phases. Evaporation is the conversion of liquid or solid into vapor. (The conversion of solid alone into vapor is called "sublimation".) In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. Unless indicated in the cell_methods attribute, a quantity is assumed to apply to the whole area of each horizontal grid box. Previously, the qualifier where_type was used to specify that the quantity applies only to the part of the grid box of the named type. Names containing the where_type qualifier are deprecated and newly created data should use the cell_methods attribute to indicate the horizontal area to which the quantity applies."Canopy" means the vegetative covering over a surface. The canopy is often considered to be the outer surfaces of the vegetation. Plant height and the distribution, orientation and shape of plant leaves within a canopy influence the atmospheric environment and many plant processes within the canopy. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Canopy. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm2p5 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 2.5 micrometers. - - kg m-2 s-1 + + m-1 - "Water" means water in all phases. Evaporation is the conversion of liquid or solid into vapor. (The conversion of solid alone into vapor is called "sublimation".) In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm2p5 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 2.5 micrometers. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. - - kg m-2 s-1 + + m-1 - evspsbl - Water means water in all phases. "Evapotranspiration" means all water vapor fluxes into the atmosphere from the surface: liquid evaporation, sublimation and transpiration. Evaporation is the conversion of liquid or solid into vapor. Transpiration is the process by which water is carried from the roots of plants and evaporates from the stomata. (The conversion of solid alone into vapor is called "sublimation".) In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. Unless indicated in the cell_methods attribute, a quantity is assumed to apply to the whole area of each horizontal grid box. + + Radiative flux is the sum of shortwave and longwave radiative fluxes. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. - - kg m-2 s-1 + + m-1 - "Water" means water in all phases. Flux correction is also called "flux adjustment". A positive flux correction is downward i.e. added to the ocean. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. + The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. Radiative flux is the sum of shortwave and longwave radiative fluxes. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. - - kg m-2 s-1 + + m-1 sr-1 - wfo - "Water" means water in all phases. The water flux into sea water is the freshwater entering as a result of precipitation, evaporation, river inflow, sea ice effects and water flux relaxation and correction (if applied). In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. + + Attenuation is the sum of absorption and scattering. Attenuation is sometimes called "extinction". The attenuated backwards scattering function includes the effects of two-way attenuation by the medium between a radar source and receiver. The volume scattering function is the fraction of incident radiative flux scattered into unit solid angle per unit path length. Backwards scattering refers to the sum of scattering into all backward angles i.e. scattering_angle exceeding pi/2 radians. A scattering_angle should not be specified with this quantity. - - kg m-2 s-1 + + m-1 sr-1 - The water flux into sea water is the freshwater entering as a result of precipitation, evaporation, river inflow, sea ice effects and water flux correction (if applied). In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Sea ice thermodynamics" refers to the addition or subtraction of mass due to surface and basal fluxes, i.e., due to melting, sublimation and fusion. "Sea ice" means all ice floating in the sea which has formed from freezing sea water, rather than by other processes such as calving of land ice to form icebergs. + Attenuation is the sum of absorption and scattering. Attenuation is sometimes called "extinction". The attenuated backwards scattering coefficient includes the effects of two-way attenuation by the medium between a radar source and receiver. The volume scattering coefficient is the fraction of incident radiative flux scattered into unit solid angle per unit path length. Backwards scattering refers to the sum of scattering into all backward angles i.e. scattering_angle exceeding pi/2 radians. A scattering_angle should not be specified with this quantity. The scattering coefficient is assumed to be an integral over all wavelengths unless a coordinate of "radiation_wavelength" or "radiation_frequency" is included to specify the wavelength. Coefficients with canonical units of m2 s-1, i.e. multiplied by density, have standard names with "specific_" instead of "volume_". Radiative flux is the sum of shortwave and longwave radiative fluxes. A phrase "assuming_condition" indicates that the named quantity is the value which would obtain if all aspects of the system were unaltered except for the assumption of the circumstances specified by the condition. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. - - kg m-2 s-1 + + m-1 - The water flux into the ocean is the freshwater entering the sea water as a result of precipitation, evaporation, river inflow, sea ice effects and water flux correction (if applied). In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Surface drainage" refers to all melt water forming at the sea ice surface and subsequently running into the sea. + Downwelling radiation is radiation from above. It does not mean "net downward". The sign convention is that "upwelling" is positive upwards and "downwelling" is positive downwards. Radiative flux is the sum of shortwave and longwave radiative fluxes. When thought of as being incident on a surface, a radiative flux is sometimes called "irradiance". In addition, it is identical with the quantity measured by a cosine-collector light-meter and sometimes called "vector irradiance". In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Attenuation is the sum of absorption and scattering. Attenuation is sometimes called "extinction". Also called "diffuse" attenuation, the attenuation of downwelling radiative flux refers to the decrease with decreasing height or increasing depth of the downwelling component of radiative flux, regardless of incident direction. - - kg m-2 s-1 + + m-1 sr-1 - The water flux into sea water is the freshwater entering as a result of precipitation, evaporation, river inflow, sea ice effects and water flux correction (if applied). The water flux into sea water from icebergs is due to the melting of the iceberg. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. + Volume backwards scattering coefficient by ranging instrument is the fraction of radiative flux, per unit path length and per unit solid angle, scattered at 180 degrees angle respect to the incident radiation and obtained through ranging techniques like lidar and radar. Backwards scattering coefficient is assumed to be related to the same wavelength of incident radiation. "Ambient_aerosol" means that the aerosol is measured or modelled at the ambient state of pressure, temperature and relative humidity that exists in its immediate environment. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. - - kg m-2 s-1 + + m-1 - "Land ice" means glaciers, ice-caps and ice-sheets resting on bedrock and also includes ice-shelves. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The water flux into sea water from land ice is the freshwater entering the ocean as a result of runoff from the surface and base of the ice and melting from the ice shelf base and vertical ice front. For an area-average, the cell_methods attribute should be used to specify whether the average is over the area of the whole grid cell or the area of the ocean portion only. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. Backwards scattering refers to the sum of scattering into all backward angles i.e. scattering_angle exceeds pi/2 radians. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. - - kg m-2 s-1 + + m-1 - "Water" means water in all phases. The water flux or volume transport into sea water from rivers is the inflow to the ocean, often applied to the surface in ocean models. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. "River" refers to water in the fluvial system (stream and floodplain). + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. Backwards scattering refers to the sum of scattering into all backward angles i.e. scattering_angle exceeds pi/2 radians. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. - - kg m-2 s-1 + + m-1 - "Water" means water in all phases, including frozen i.e. ice and snow. The surface called "surface" means the lower boundary of the atmosphere. "Downward" indicates a vector component which is positive when directed downward (negative upward). The surface water flux is the result of precipitation and evaporation. The water flux into sea water is the freshwater entering as a result of precipitation, evaporation, river inflow, sea ice effects and water flux correction (if applied). The water flux or volume transport into sea water from rivers is the inflow to the ocean, often applied to the surface in ocean models. "River" refers to water in the fluvial system (stream and floodplain). In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. Backwards scattering refers to the sum of scattering into all backward angles i.e. scattering_angle exceeds pi/2 radians. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. - - kg m-2 s-1 + + m-1 - Water means water in all phases. The water_flux_into_sea_water_without_flux_correction is the freshwater entering as a result of precipitation, evaporation, river inflow and sea ice effects. The total water flux including any flux relaxation(s) or correction(s) is described by the standard name water_flux_into_sea_water. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. Backwards scattering refers to the sum of scattering into all backward angles i.e. scattering_angle exceeds pi/2 radians. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. - - kg m-2 s-1 + + m-1 - "Water" means water in all phases. The water_flux_out_of_sea_ice_and_sea_water is the freshwater leaving the ocean as a result of precipitation, evaporation, river outflow and any water flux relaxation(s) and correction(s) that may have been applied. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. "Sea ice" means all ice floating in the sea which has formed from freezing sea water, rather than by other processes such as calving of land ice to form icebergs. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. Backwards scattering refers to the sum of scattering into all backward angles i.e. scattering_angle exceeds pi/2 radians. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Dried_aerosol" means that the aerosol sample has been dried from the ambient state, but that the dry state (relative humidity less than 40 per cent) has not necessarily been reached. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. - - kg m-2 s-1 + + m-1 - The quantity water_flux_out_of_sea_water is the quantity with standard name water_flux_into_sea_water multiplied by -1. "Water" means water in all phases. The water flux out of sea water is the freshwater leaving as a result of precipitation, evaporation, river outflow, sea-ice and any water flux relaxation(s) and correction(s) that may have been applied. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. Backwards scattering refers to the sum of scattering into all backward angles i.e. scattering_angle exceeds pi/2 radians. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Dried_aerosol_particles" means that the aerosol sample has been dried from the ambient state before sizing, but that the dry state (relative humidity less than 40 per cent) has not necessarily been reached. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. - - kg m-2 s-1 + + m-1 - The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. The water_flux_out_of_sea_water_due_to_newtonian_relaxation is the freshwater leaving as a result of the Newtonian relaxation of the sea surface salinity. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. Backwards scattering refers to the sum of scattering into all backward angles i.e. scattering_angle exceeds pi/2 radians. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. - - kg m-2 s-1 + + m-1 - The water flux out of sea water is the freshwater leaving the sea water. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Sea ice thermodynamics" refers to the addition or subtraction of sea ice mass due to surface and basal fluxes, i.e. due to melting, sublimation and fusion. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. Backwards scattering refers to the sum of scattering into all backward angles i.e. scattering_angle exceeds pi/2 radians. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. - - kg m-2 + + m-1 - "Amount" means mass per unit area. "Water" means water in all phases. Evaporation is the conversion of liquid or solid into vapor. (The conversion of solid alone into vapor is called "sublimation".) Potential evaporation is the rate at which evaporation would take place under unaltered ambient conditions (temperature, relative humidity, wind, etc.) if the supply of water were unlimited, as if from an open water surface. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. Backwards scattering refers to the sum of scattering into all backward angles i.e. scattering_angle exceeds pi/2 radians. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm10 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 10 micrometers. - - kg m-2 s-1 + + m-1 - "Water" means water in all phases. Evaporation is the conversion of liquid or solid into vapor. (The conversion of solid alone into vapor is called "sublimation".) Potential evaporation is the rate at which evaporation would take place under unaltered ambient conditions (temperature, relative humidity, wind, etc.) if the supply of water were unlimited, as if from an open water surface. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. Backwards scattering refers to the sum of scattering into all backward angles i.e. scattering_angle exceeds pi/2 radians. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm10 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 10 micrometers. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. - - kg m-2 s-1 + + m-1 - "Water" means water in all phases. Sublimation is the conversion of solid into vapor. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. Backwards scattering refers to the sum of scattering into all backward angles i.e. scattering_angle exceeds pi/2 radians. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Ambient_aerosol" means that the aerosol is measured or modelled at the ambient state of pressure, temperature and relative humidity that exists in its immediate environment. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm10 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 10 micrometers. - - m + + m-1 - 'Water surface height above reference datum' means the height of the upper surface of a body of liquid water, such as sea, lake or river, above an arbitrary reference datum. The altitude of the datum should be provided in a variable with standard name water_surface_reference_datum_altitude. The surface called "surface" means the lower boundary of the atmosphere. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. Backwards scattering refers to the sum of scattering into all backward angles i.e. scattering_angle exceeds pi/2 radians. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm10 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 10 micrometers. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. - - m + + m-1 - Altitude is the (geometric) height above the geoid, which is the reference geopotential surface. The geoid is similar to mean sea level. 'Water surface reference datum altitude' means the altitude of the arbitrary datum referred to by a quantity with standard name 'water_surface_height_above_reference_datum'. The surface called "surface" means the lower boundary of the atmosphere. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. Backwards scattering refers to the sum of scattering into all backward angles i.e. scattering_angle exceeds pi/2 radians. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Dried_aerosol_particles" means that the aerosol sample has been dried from the ambient state before sizing, but that the dry state (relative humidity less than 40 per cent) has not necessarily been reached. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm10 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 10 micrometers. - - m + + m-1 - Depth is the vertical distance below the surface. The water table is the surface below which the soil is saturated with water such that all pore spaces are filled. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. Backwards scattering refers to the sum of scattering into all backward angles i.e. scattering_angle exceeds pi/2 radians. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Dried_aerosol_particles" means that the aerosol sample has been dried from the ambient state before sizing, but that the dry state (relative humidity less than 40 per cent) has not necessarily been reached. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm10 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 10 micrometers. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. - - Pa - 55 + + m-1 + - The partial pressure of a gaseous constituent of air is the pressure which it alone would exert with unchanged temperature and number of moles per unit volume. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. Backwards scattering refers to the sum of scattering into all backward angles i.e. scattering_angle exceeds pi/2 radians. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm10 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 10 micrometers. - - Pa - 56 + + m-1 + - Water vapor saturation deficit is the difference between the saturationwater vapor partial pressure and the actual water vapor partial pressure in air. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. Backwards scattering refers to the sum of scattering into all backward angles i.e. scattering_angle exceeds pi/2 radians. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm10 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 10 micrometers. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. - - m3 s-1 + + m-1 - The water flux or volume transport in rivers is the amount of water flowing in the river channel and flood plain. "Water" means water in all phases. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. Backwards scattering refers to the sum of scattering into all backward angles i.e. scattering_angle exceeds pi/2 radians. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm1 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 1 micrometers. - - m3 s-1 + + m-1 - "Water" means water in all phases. The water flux or volume transport into sea water from rivers is the inflow to the ocean, often applied to the surface in ocean models. "River" refers to water in the fluvial system (stream and floodplain). + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. Backwards scattering refers to the sum of scattering into all backward angles i.e. scattering_angle exceeds pi/2 radians. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm1 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 1 micrometers. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. - - s-1 + + m-1 - Frequency is the number of oscillations of a wave per unit time. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. Backwards scattering refers to the sum of scattering into all backward angles i.e. scattering_angle exceeds pi/2 radians. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm1 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 1 micrometers. - - s-2 + + m-1 - A quantity with standard name Xward_Yward_derivative_of_geopotential is a second spatial derivative of geopotential, P, in the direction specified by X and Y, i.e., d2P/dXdY. Geopotential is the sum of the specific gravitational potential energy relative to the geoid and the specific centripetal potential energy. "Westward" indicates a vector component which is positive when directed westward (negative eastward). "Upward" indicates a vector component which is positive when directed upward (negative downward). "component_derivative_of_X" means derivative of X with respect to distance in the component direction, which may be "northward", "southward", "eastward", "westward", "x" or "y". The last two indicate derivatives along the axes of the grid, in the case where they are not true longitude and latitude. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. Backwards scattering refers to the sum of scattering into all backward angles i.e. scattering_angle exceeds pi/2 radians. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm1 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 1 micrometers. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. - - s-2 + + m-1 - A quantity with standard name Xward_Yward_derivative_of_geopotential is a second spatial derivative of geopotential, P, in the direction specified by X and Y, i.e., d2P/dXdY. Geopotential is the sum of the specific gravitational potential energy relative to the geoid and the specific centripetal potential energy. "Westward" indicates a vector component which is positive when directed westward (negative eastward). "component_derivative_of_X" means derivative of X with respect to distance in the component direction, which may be "northward", "southward", "eastward", "westward", "x" or "y". The last two indicate derivatives along the axes of the grid, in the case where they are not true longitude and latitude. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. Backwards scattering refers to the sum of scattering into all backward angles i.e. scattering_angle exceeds pi/2 radians. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Dried_aerosol_particles" means that the aerosol sample has been dried from the ambient state before sizing, but that the dry state (relative humidity less than 40 per cent) has not necessarily been reached. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm1 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 1 micrometers. - - K + + m-1 - Wet bulb potential temperature is the temperature a parcel of air would have if moved dry adiabatically until it reaches saturation and thereafter moist adiabatically to sea level pressure. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. Backwards scattering refers to the sum of scattering into all backward angles i.e. scattering_angle exceeds pi/2 radians. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Dried_aerosol_particles" means that the aerosol sample has been dried from the ambient state before sizing, but that the dry state (relative humidity less than 40 per cent) has not necessarily been reached. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm1 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 1 micrometers. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. - - K + + m-1 - + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. Backwards scattering refers to the sum of scattering into all backward angles i.e. scattering_angle exceeds pi/2 radians. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm1 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 1 micrometers. - - degree - 31 + + m-1 + - Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name upward_air_velocity.) In meteorological reports, the direction of the wind vector is usually (but not always) given as the direction from which it is blowing (wind_from_direction) (westerly, northerly, etc.). In other contexts, such as atmospheric modelling, it is often natural to give the direction in the usual manner of vectors as the heading or the direction to which it is blowing (wind_to_direction) (eastward, southward, etc.) "from_direction" is used in the construction X_from_direction and indicates the direction from which the velocity vector of X is coming. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. Backwards scattering refers to the sum of scattering into all backward angles i.e. scattering_angle exceeds pi/2 radians. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm1 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 1 micrometers. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. - - degree + + m-1 - The phrase "from_direction" is used in the construction X_from_direction and indicates the direction from which the velocity vector of X is coming. The direction is a bearing in the usual geographical sense, measured positive clockwise from due north. A gust is a sudden brief period of high wind speed. In an observed time series of wind speed, the gust wind speed can be indicated by a cell_methods of "maximum" for the time-interval. In an atmospheric model which has a parametrised calculation of gustiness, the gust wind speed may be separately diagnosed from the wind speed. Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name "upward_air_velocity".) In meteorological reports, the direction of the wind vector is usually (but not always) given as the direction from which it is blowing ("wind_from_direction") (westerly, northerly, etc.). In other contexts, such as atmospheric modelling, it is often natural to give the direction in the usual manner of vectors as the heading or the direction to which it is blowing ("wind_to_direction") (eastward, southward, etc.). + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. Backwards scattering refers to the sum of scattering into all backward angles i.e. scattering_angle exceeds pi/2 radians. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm1 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 1 micrometers. - - W m-2 - 126 + + m-1 + - Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name upward_air_velocity.) In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. Backwards scattering refers to the sum of scattering into all backward angles i.e. scattering_angle exceeds pi/2 radians. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm2p5 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 2.5 micrometers. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. - - m s-1 - 32 + + m-1 + - Speed is the magnitude of velocity. Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name upward_air_velocity.) The wind speed is the magnitude of the wind velocity. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. Backwards scattering refers to the sum of scattering into all backward angles i.e. scattering_angle exceeds pi/2 radians. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Ambient_aerosol" means that the aerosol is measured or modelled at the ambient state of pressure, temperature and relative humidity that exists in its immediate environment. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm2p5 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 2.5 micrometers. - - m s-1 + + m-1 - Speed is the magnitude of velocity. Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name upward_air_velocity.) The wind speed is the magnitude of the wind velocity. A gust is a sudden brief period of high wind speed. In an observed timeseries of wind speed, the gust wind speed can be indicated by a cell_methods of maximum for the time-interval. In an atmospheric model which has a parametrised calculation of gustiness, the gust wind speed may be separately diagnosed from the wind speed. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. Backwards scattering refers to the sum of scattering into all backward angles i.e. scattering_angle exceeds pi/2 radians. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm2p5 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 2.5 micrometers. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. - - s-1 - N136 + + m-1 + - Speed is the magnitude of velocity. Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name upward_air_velocity.) The wind speed is the magnitude of the wind velocity. Wind speed shear is the derivative of wind speed with respect to height. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. Backwards scattering refers to the sum of scattering into all backward angles i.e. scattering_angle exceeds pi/2 radians. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Dried_aerosol_particles" means that the aerosol sample has been dried from the ambient state before sizing, but that the dry state (relative humidity less than 40 per cent) has not necessarily been reached. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm2p5 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 2.5 micrometers. - - degree + + m-1 - Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name upward_air_velocity.) In meteorological reports, the direction of the wind vector is usually (but not always) given as the direction from which it is blowing (wind_from_direction) (westerly, northerly, etc.). In other contexts, such as atmospheric modelling, it is often natural to give the direction in the usual manner of vectors as the heading or the direction to which it is blowing (wind_to_direction) (eastward, southward, etc.) "to_direction" is used in the construction X_to_direction and indicates the direction towards which the velocity vector of X is headed. The direction is a bearing in the usual geographical sense, measured positive clockwise from due north. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. Backwards scattering refers to the sum of scattering into all backward angles i.e. scattering_angle exceeds pi/2 radians. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Dried_aerosol_particles" means that the aerosol sample has been dried from the ambient state before sizing, but that the dry state (relative humidity less than 40 per cent) has not necessarily been reached. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm2p5 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 2.5 micrometers. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. - - kg m-2 + + m-1 - "Content" indicates a quantity per unit area. "Wood debris" means dead organic matter composed of coarse wood. It is distinct from fine litter. The precise distinction between "fine" and "coarse" is model dependent. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. Backwards scattering refers to the sum of scattering into all backward angles i.e. scattering_angle exceeds pi/2 radians. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm2p5 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 2.5 micrometers. - - kg m-2 + + m-1 - "Content" indicates a quantity per unit area. "Wood debris" means dead organic matter composed of coarse wood. It is distinct from fine litter. The precise distinction between "fine" and "coarse" is model dependent. The sum of the quantities with standard names wood_debris_mass_content_of_nitrogen, surface_litter_mass_content_of_nitrogen and subsurface_litter_mass_content_of_nitrogen is the total nitrogen mass content of dead plant material. + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. Backwards scattering refers to the sum of scattering into all backward angles i.e. scattering_angle exceeds pi/2 radians. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm2p5 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 2.5 micrometers. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. - - Pa m-1 + + m-1 - "component_derivative_of_X" means the derivative of X with respect to distance in the component direction, which may be northward, southward, eastward, westward, x or y. The last two indicate derivatives along the axes of the grid, whether or not they are true longitude and latitude. x_derivative_of_ocean_rigid_lid_pressure means (d/dx) of the ocean surface pressure, as derived by a rigid lid approximation, keeping the other horizontal coordinate (y, presumably) constant. + Radiative flux is the sum of shortwave and longwave radiative fluxes. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Scattering of radiation is its deflection from its incident path without loss of energy. Backwards scattering refers to the sum of scattering into all backward angles i.e. scattering_angle exceeding pi/2 radians. A scattering_angle should not be specified with this quantity. - - W m-2 + + m-1 - "x" indicates a vector component along the grid x-axis, positive with increasing x. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. + Radiative flux is the sum of shortwave and longwave radiative fluxes. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Attenuation is the sum of absorption and scattering. Attenuation is sometimes called "extinction". Beam attenuation refers to the decrease of radiative flux along the direction of the incident path. It is distinguished from attenuation of the downwelling component of radiative flux from any incident direction, also called "diffuse" attenuation. - - m s-1 + + m-1 - "x" indicates a vector component along the grid x-axis, positive with increasing x. Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name upward_air_velocity.) + Radiative flux is the sum of shortwave and longwave radiative fluxes. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Attenuation is the sum of absorption and scattering. Attenuation is sometimes called "extinction". Beam attenuation refers to the decrease of radiative flux along the direction of the incident path. It is distinguished from attenuation of the downwelling component of radiative flux from any incident direction, also called "diffuse" attenuation. The phrase "corrected for pure water attenuance" means the attenuation coefficient has been adjusted/calibrated to remove the influence of absorption/scattering by the water itself. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. - - m s-1 + + 1 - "x" indicates a vector component along the grid x-axis, positive with increasing x. Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name upward_air_velocity.) A gust is a sudden brief period of high wind speed. In an observed timeseries of wind speed, the gust wind speed can be indicated by a cell_methods of maximum for the time-interval. In an atmospheric model which has a parametrised calculation of gustiness, the gust wind speed may be separately diagnosed from the wind speed. + The volume extinction Angstrom exponent is the Angstrom exponent obtained for the aerosol extinction instead that for the aerosol optical thickness. It is alpha in the following equation relating aerosol extinction (ext) at the wavelength lambda to aerosol extinction at a different wavelength lambda0: ext(lambda) = ext(lambda0) * [lambda/lambda0] ** (-1 * alpha). "Ambient_aerosol" means that the aerosol is measured or modelled at the ambient state of pressure, temperature and relative humidity that exists in its immediate environment. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. - - Pa m-1 + + m-1 - "component_derivative_of_X" means the derivative of X with respect to distance in the component direction, which may be northward, southward, eastward, westward, x or y. The last two indicate derivatives along the axes of the grid, whether or not they are true longitude and latitude. y_derivative_of_ocean_rigid_lid_pressure means (d/dy) of the ocean surface pressure, as derived by a rigid lid approximation, keeping the other horizontal coordinate (x, presumably) constant. + The volume extinction coefficient is the fractional change of radiative flux per unit path length. Extinction is the sum of absorption and scattering, sometimes called "attenuation". "Extinction" is the term most commonly used at optical wavelengths whereas "attenuation" is more often used at radio and radar wavelengths. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Ambient_aerosol" means that the aerosol is measured or modelled at the ambient state of pressure, temperature and relative humidity that exists in its immediate environment. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. - - W m-2 + + m-1 - "y" indicates a vector component along the grid y-axis, positive with increasing y. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. + The volume extinction coefficient is the fractional change of radiative flux per unit path length. Extinction is the sum of absorption and scattering, sometimes called "attenuation". "Extinction" is the term most commonly used at optical wavelengths whereas "attenuation" is more often used at radio and radar wavelengths. Radiative flux is the sum of shortwave and longwave radiative fluxes. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Cloud particles" means suspended liquid or ice water droplets. A coordinate of radiation_wavelength or radiation_frequency should be included to specify either the wavelength or frequency. - - m s-1 + + 1 - "y" indicates a vector component along the grid y-axis, positive with increasing y. Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name upward_air_velocity.) + "Volume fraction" is used in the construction volume_fraction_of_X_in_Y, where X is a material constituent of Y. It is evaluated as the volume of X divided by the volume of Y (including X). It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. - - m s-1 + + 1 - "y" indicates a vector component along the grid y-axis, positive with increasing y. Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name upward_air_velocity.) A gust is a sudden brief period of high wind speed. In an observed time series of wind speed, the gust wind speed can be indicated by a cell_methods of maximum for the time-interval. In an atmospheric model which has a parametrised calculation of gustiness, the gust wind speed may be separately diagnosed from the wind speed. + "Volume fraction" is used in the construction "volume_fraction_of_X_in_Y", where X is a material constituent of Y. It is evaluated as the volume of X divided by the volume of Y (including X). It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. The phrase "condensed_water" means liquid and ice. - - degree + + 1 - Zenith angle is the angle to the local vertical; a value of zero is directly overhead. + "Volume fraction" is used in the construction "volume_fraction_of_X_in_Y", where X is a material constituent of Y. It is evaluated as the volume of X divided by the volume of Y (including X). It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. The phrase "condensed_water" means liquid and ice. When soil moisture equals or exceeds the critical point, evapotranspiration takes place at the potential rate and is controlled by the ambient meteorological conditions (temperature, wind, relative humidity). Potential evapotranspiration is the rate at which evapotranspiration would occur under ambient conditions from a uniformly vegetated area when the water supply is not limiting. - - - isotropic_longwave_radiance_in_air - + + 1 + + + "Volume fraction" is used in the construction "volume_fraction_of_X_in_Y", where X is a material constituent of Y. It is evaluated as the volume of X divided by the volume of Y (including X). It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. The phrase "condensed_water" means liquid and ice. The field capacity of soil is the maximum content of water it can retain against gravitational drainage. + - - isotropic_shortwave_radiance_in_air - + + 1 + + + "Volume fraction" is used in the construction "volume_fraction_of_X_in_Y", where X is a material constituent of Y. It is evaluated as the volume of X divided by the volume of Y (including X). It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. The phrase "condensed_water" means liquid and ice. The wilting point of soil is the water content below which plants cannot extract sufficient water to balance their loss through transpiration. + - - lagrangian_tendency_of_atmosphere_sigma_coordinate - + + 1 + + + "Volume fraction" is used in the construction "volume_fraction_of_X_in_Y", where X is a material constituent of Y. It is evaluated as the volume of X divided by the volume of Y (including X). It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. "Condensed water" means liquid and ice. The quantity with standard name volume_fraction_of_condensed_water_in_soil_pores is the ratio of the volume of condensed water in soil pores to the volume of the pores themselves. + - - lagrangian_tendency_of_atmosphere_sigma_coordinate - + + 1 + + + "Volume fraction" is used in the construction "volume_fraction_of_X_in_Y", where X is a material constituent of Y. It is evaluated as the volume of X divided by the volume of Y (including X). It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. The phrase "frozen_water" means ice. + - - mole_fraction_of_ozone_in_air - + + + + + "Volume fraction" is used in the construction "volume_fraction_of_X_in_Y", where X is a material constituent of Y. It is evaluated as the volume of X divided by the volume of Y (including X). It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. "Sea floor sediment" is sediment deposited at the sea bed. "Water" means water in all phases. + - - product_of_northward_wind_and_specific_humidity - + + 1 + + + "Volume fraction" is used in the construction "volume_fraction_of_X_in_Y", where X is a material constituent of Y. It is evaluated as the volume of X divided by the volume of Y (including X). It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. + - - radiation_wavelength - + + 1 + + + "Volume fraction" is used in the construction "volume_fraction_of_X_in_Y", where X is a material constituent of Y. It is evaluated as the volume of X divided by the volume of Y (including X). It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. + - - specific_gravitational_potential_energy - + + 1 + + + "Volume fraction" is used in the construction "volume_fraction_of_X_in_Y", where X is a material constituent of Y. It is evaluated as the volume of X divided by the volume of Y (including X). It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. + - - surface_drag_coefficient_for_heat_in_air - + + 1 + + + "Volume fraction" is used in the construction volume_fraction_of_X_in_Y, where X is a material constituent of Y. It is evaluated as the volume of X divided by the volume of Y (including X). It may be expressed as a fraction, a percentage, or any other dimensionless representation of a fraction. The volume_fraction_of_water_in_soil_at_saturation is the volume fraction at which a soil has reached it's maximum water holding capacity. + - - surface_drag_coefficient_for_momentum_in_air + + 1 + + + "ratio_of_X_to_Y" means X/Y. "stp" means standard temperature (0 degC) and pressure (101325 Pa). + + + + m-1 + + + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. + + + + m-1 + + + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. + + + + m-1 + + + Radiative flux is the sum of shortwave and longwave radiative fluxes. Scattering of radiation is its deflection from its incident path without loss of energy. The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with "specific_" instead of "volume_". The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths unless a coordinate of "radiation_wavelength" or "radiation_frequency" is included to specify the wavelength. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Ambient_aerosol" means that the aerosol is measured or modelled at the ambient state of pressure, temperature and relative humidity that exist in its immediate environment. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. To specify the relative humidity at which the quantity described by the standard name applies, provide a scalar coordinate variable with the standard name of "relative_humidity". + + + + m-1 + + + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. + + + + m-1 + + + Radiative flux is the sum of shortwave and longwave radiative fluxes. Scattering of radiation is its deflection from its incident path without loss of energy. The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with "specific_" instead of "volume_". The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths unless a coordinate of "radiation_wavelength" or "radiation_frequency" is included to specify the wavelength. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Dried_aerosol" means that the aerosol sample has been dried from the ambient state before sizing, but that the dry state (relative humidity less than 40 per cent) has not necessarily been reached. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". + + + + m-1 + + + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Dried_aerosol_particles" means that the aerosol sample has been dried from the ambient state before sizing, but that the dry state (relative humidity less than 40 per cent) has not necessarily been reached. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. + + + + m-1 + + + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. + + + + m-1 + + + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. + + + + m-1 + + + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm10 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 10 micrometers. + + + + m-1 + + + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm10 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 10 micrometers. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. + + + + m-1 + + + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm10 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 10 micrometers. + + + + m-1 + + + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm10 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 10 micrometers. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. + + + + m-1 + + + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Dried_aerosol_particles" means that the aerosol sample has been dried from the ambient state before sizing, but that the dry state (relative humidity less than 40 per cent) has not necessarily been reached. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm10 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 10 micrometers. + + + + m-1 + + + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Dried_aerosol_particles" means that the aerosol sample has been dried from the ambient state before sizing, but that the dry state (relative humidity less than 40 per cent) has not necessarily been reached. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm10 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 10 micrometers. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. + + + + m-1 + + + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm10 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 10 micrometers. + + + + m-1 + + + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm10 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 10 micrometers. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. + + + + m-1 + + + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm1 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 1 micrometers. + + + + m-1 + + + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm1 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 1 micrometers. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. + + + + m-1 + + + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm1 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 1 micrometers. + + + + m-1 + + + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm1 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 1 micrometers. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. + + + + m-1 + + + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Dried_aerosol_particles" means that the aerosol sample has been dried from the ambient state before sizing, but that the dry state (relative humidity less than 40 per cent) has not necessarily been reached. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm1 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 1 micrometers. + + + + m-1 + + + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Dried_aerosol_particles" means that the aerosol sample has been dried from the ambient state before sizing, but that the dry state (relative humidity less than 40 per cent) has not necessarily been reached. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm1 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 1 micrometers. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. + + + + m-1 + + + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm1 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 1 micrometers. + + + + m-1 + + + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm1 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 1 micrometers. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. + + + + m-1 + + + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm2p5 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 2.5 micrometers. + + + + m-1 + + + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm2p5 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 2.5 micrometers. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. + + + + m-1 + + + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm2p5 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 2.5 micrometers. + + + + m-1 + + + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm2p5 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 2.5 micrometers. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. + + + + m-1 + + + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Dried_aerosol_particles" means that the aerosol sample has been dried from the ambient state before sizing, but that the dry state (relative humidity less than 40 per cent) has not necessarily been reached. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm10 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 10 micrometers. + + + + m-1 + + + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Dried_aerosol_particles" means that the aerosol sample has been dried from the ambient state before sizing, but that the dry state (relative humidity less than 40 per cent) has not necessarily been reached. To specify the relative humidity at which the sample was measured, provide a scalar coordinate variable with the standard name of "relative_humidity". The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm2p5 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 2.5 micrometers. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. + + + + m-1 + + + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm2p5 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 2.5 micrometers. + + + + m-1 + + + The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. A scattering_angle should not be specified with this quantity. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Radiative flux is the sum of shortwave and longwave radiative fluxes. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. Aerosol particles take up ambient water (a process known as hygroscopic growth) depending on the relative humidity and the composition of the particles. "Dry aerosol particles" means aerosol particles without any water uptake. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Pm2p5 aerosol particles" means atmospheric particulate compounds with an aerodynamic diameter of less than or equal to 2.5 micrometers. "Standard_temperature_and_pressure" refer to a reference volume at 273.15 K temperature and 1013.25 hPa pressure. + + + + m-1 + + + Radiative flux is the sum of shortwave and longwave radiative fluxes. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The volume scattering/absorption/attenuation coefficient is the fractional change of radiative flux per unit path length due to the stated process. Coefficients with canonical units of m2 s-1 i.e. multiplied by density have standard names with specific_ instead of volume_. The scattering/absorption/attenuation coefficient is assumed to be an integral over all wavelengths, unless a coordinate of radiation_wavelength is included to specify the wavelength. Scattering of radiation is its deflection from its incident path without loss of energy. The (range of) direction(s) of scattering can be specified by a coordinate of scattering_angle. + + + + m-1 sr-1 + + + Radiative flux is the sum of shortwave and longwave radiative fluxes. Scattering of radiation is its deflection from its incident path without loss of energy. The volume scattering function is the intensity (flux per unit solid angle) of scattered radiation per unit length of scattering medium, normalised by the incident radiation flux. The (range of) direction(s) of scattering can be specified by a coordinate of scattering_angle. A coordinate variable of radiation_wavelength or radiation_frequency can be specified to indicate that the scattering applies at specific wavelengths or frequencies. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Aerosol" means the system of suspended liquid or solid particles in air (except cloud droplets) and their carrier gas, the air itself. "Ambient_aerosol" means that the aerosol is measured or modelled at the ambient state of pressure, temperature and relative humidity that exists in its immediate environment. "Ambient aerosol particles" are aerosol particles that have taken up ambient water through hygroscopic growth. The extent of hygroscopic growth depends on the relative humidity and the composition of the particles. To specify the relative humidity and temperature at which the quantity described by the standard name applies, provide scalar coordinate variables with standard names of "relative_humidity" and "air_temperature". + + + + m-1 sr-1 + + + Radiative flux is the sum of shortwave and longwave radiative fluxes. Scattering of radiation is its deflection from its incident path without loss of energy. The volume scattering function is the intensity (flux per unit solid angle) of scattered radiation per unit length of scattering medium, normalised by the incident radiation flux. The (range of) direction(s) of scattering can be specified by a coordinate of scattering_angle. A coordinate variable of radiation_wavelength or radiation_frequency can be specified to indicate that the scattering applies at specific wavelengths or frequencies. + + + + kg m-2 + 57 + + "Amount" means mass per unit area. "Water" means water in all phases. Evaporation is the conversion of liquid or solid into vapor. (The conversion of solid alone into vapor is called "sublimation".) + + + + kg m-2 + + + "Amount" means mass per unit area. "Water" means water in all phases. Evaporation is the conversion of liquid or solid into vapor. (The conversion of solid alone into vapor is called "sublimation".) "Canopy" means the vegetative covering over a surface. The canopy is often considered to be the outer surfaces of the vegetation. Plant height and the distribution, orientation and shape of plant leaves within a canopy influence the atmospheric environment and many plant processes within the canopy. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Canopy. Unless indicated in the cell_methods attribute, a quantity is assumed to apply to the whole area of each horizontal grid box. Previously, the qualifier "where_type" was used to specify that the quantity applies only to the part of the grid box of the named type. Names containing the where_type qualifier are deprecated and newly created data should use the cell_methods attribute to indicate the horizontal area to which the quantity applies. + + + + kg m-2 s-1 + + + "Water" means water in all phases. Evaporation is the conversion of liquid or solid into vapor. (The conversion of solid alone into vapor is called "sublimation".) In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. Unless indicated in the cell_methods attribute, a quantity is assumed to apply to the whole area of each horizontal grid box. Previously, the qualifier where_type was used to specify that the quantity applies only to the part of the grid box of the named type. Names containing the where_type qualifier are deprecated and newly created data should use the cell_methods attribute to indicate the horizontal area to which the quantity applies."Canopy" means the vegetative covering over a surface. The canopy is often considered to be the outer surfaces of the vegetation. Plant height and the distribution, orientation and shape of plant leaves within a canopy influence the atmospheric environment and many plant processes within the canopy. Reference: AMS Glossary http://glossary.ametsoc.org/wiki/Canopy. + + + + kg m-2 s-1 + + + "Water" means water in all phases. Evaporation is the conversion of liquid or solid into vapor. (The conversion of solid alone into vapor is called "sublimation".) In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. + + + + kg m-2 + + + "Evapotranspiration" means all water vapor fluxes into the atmosphere from the surface: liquid evaporation, sublimation, and transpiration. "Amount" means mass per unit area. Evaporation is the conversion of liquid or solid into vapor. (The conversion of solid alone into vapor is called "sublimation".) Transpiration is the process by which liquid water in plant stomata is transferred as water vapor into the atmosphere. Unless indicated in the cell_methods attribute, a quantity is assumed to apply to the whole area of each horizontal grid box. + + + + kg m-2 s-1 + + evspsbl + Water means water in all phases. "Evapotranspiration" means all water vapor fluxes into the atmosphere from the surface: liquid evaporation, sublimation and transpiration. Evaporation is the conversion of liquid or solid into vapor. Transpiration is the process by which liquid water in plant stomata is transferred as water vapor into the atmosphere. (The conversion of solid alone into vapor is called "sublimation".) In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. Unless indicated in the cell_methods attribute, a quantity is assumed to apply to the whole area of each horizontal grid box. + + + + kg m-2 s-1 + + wfo + "Water" means water in all phases. The water flux into sea water is the freshwater entering as a result of precipitation, evaporation, river inflow, sea ice effects and water flux relaxation and correction (if applied). In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. + + + + kg m-2 s-1 + + + "Water" means water in all phases. Flux correction is also called "flux adjustment". A positive flux correction is downward i.e. added to the ocean. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. + + + + kg m-2 s-1 + + + The water flux into sea water is the freshwater entering as a result of precipitation, evaporation, river inflow, sea ice effects and water flux correction (if applied). In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Sea ice thermodynamics" refers to the addition or subtraction of mass due to surface and basal fluxes, i.e., due to melting, sublimation and fusion. "Sea ice" means all ice floating in the sea which has formed from freezing sea water, rather than by other processes such as calving of land ice to form icebergs. + + + + kg m-2 s-1 + + + The water flux into the ocean is the freshwater entering the sea water as a result of precipitation, evaporation, river inflow, sea ice effects and water flux correction (if applied). In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The specification of a physical process by the phrase "due_to_" process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Surface drainage" refers to all melt water forming at the sea ice surface and subsequently running into the sea. + + + + kg m-2 s-1 + + + The water flux into sea water is the freshwater entering as a result of precipitation, evaporation, river inflow, sea ice effects and water flux correction (if applied). The water flux into sea water from icebergs is due to the melting of the iceberg. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. + + + + kg m-2 s-1 + + + "Land ice" means glaciers, ice-caps and ice-sheets resting on bedrock and also includes ice-shelves. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The water flux into sea water from land ice is the freshwater entering the ocean as a result of runoff from the surface and base of the ice and melting from the ice shelf base and vertical ice front. For an area-average, the cell_methods attribute should be used to specify whether the average is over the area of the whole grid cell or the area of the ocean portion only. + + + + kg m-2 s-1 + + + "Water" means water in all phases. The water flux or volume transport into sea water from rivers is the inflow to the ocean, often applied to the surface in ocean models. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. "River" refers to water in the fluvial system (stream and floodplain). + + + + kg m-2 s-1 + + + "Water" means water in all phases, including frozen i.e. ice and snow. The surface called "surface" means the lower boundary of the atmosphere. "Downward" indicates a vector component which is positive when directed downward (negative upward). The surface water flux is the result of precipitation and evaporation. The water flux into sea water is the freshwater entering as a result of precipitation, evaporation, river inflow, sea ice effects and water flux correction (if applied). The water flux or volume transport into sea water from rivers is the inflow to the ocean, often applied to the surface in ocean models. "River" refers to water in the fluvial system (stream and floodplain). In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. + + + + kg m-2 s-1 + + + Water means water in all phases. The water_flux_into_sea_water_without_flux_correction is the freshwater entering as a result of precipitation, evaporation, river inflow and sea ice effects. The total water flux including any flux relaxation(s) or correction(s) is described by the standard name water_flux_into_sea_water. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. + + + + kg m-2 s-1 + + + "Water" means water in all phases. The water_flux_out_of_sea_ice_and_sea_water is the freshwater leaving the ocean as a result of precipitation, evaporation, river outflow and any water flux relaxation(s) and correction(s) that may have been applied. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. "Sea ice" means all ice floating in the sea which has formed from freezing sea water, rather than by other processes such as calving of land ice to form icebergs. + + + + kg m-2 s-1 + + + The quantity water_flux_out_of_sea_water is the quantity with standard name water_flux_into_sea_water multiplied by -1. "Water" means water in all phases. The water flux out of sea water is the freshwater leaving as a result of precipitation, evaporation, river outflow, sea-ice and any water flux relaxation(s) and correction(s) that may have been applied. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. + + + + kg m-2 s-1 + + + The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. The water_flux_out_of_sea_water_due_to_newtonian_relaxation is the freshwater leaving as a result of the Newtonian relaxation of the sea surface salinity. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. + + + + kg m-2 s-1 + + + The water flux out of sea water is the freshwater leaving the sea water. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. "Sea ice thermodynamics" refers to the addition or subtraction of sea ice mass due to surface and basal fluxes, i.e. due to melting, sublimation and fusion. + + + + kg m-2 + + + "Amount" means mass per unit area. "Water" means water in all phases. Evaporation is the conversion of liquid or solid into vapor. (The conversion of solid alone into vapor is called "sublimation".) Potential evaporation is the rate at which evaporation would take place under unaltered ambient conditions (temperature, relative humidity, wind, etc.) if the supply of water were unlimited, as if from an open water surface. + + + + kg m-2 s-1 + + + "Water" means water in all phases. Evaporation is the conversion of liquid or solid into vapor. (The conversion of solid alone into vapor is called "sublimation".) Potential evaporation is the rate at which evaporation would take place under unaltered ambient conditions (temperature, relative humidity, wind, etc.) if the supply of water were unlimited, as if from an open water surface. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. + + + + kg m-2 + + + Potential evapotranspiration is the rate at which evapotranspiration would occur under ambient conditions from a uniformly vegetated area when the water supply is not limiting. "Evapotranspiration" means all water vapor fluxes into the atmosphere from the surface: liquid evaporation, sublimation and transpiration. Transpiration is the process by which liquid water in plant stomata is transferred as water vapor into the atmosphere. Evaporation is the conversion of liquid or solid into vapor. (The conversion of solid alone into vapor is called "sublimation"). Amount means mass per unit area. + + + + kg m-2 s-1 + + + "Water" means water in all phases. Sublimation is the conversion of solid into vapor. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. + + + + m + + + 'Water surface height above reference datum' means the height of the upper surface of a body of liquid water, such as sea, lake or river, above an arbitrary reference datum. The altitude of the datum should be provided in a variable with standard name water_surface_reference_datum_altitude. The surface called "surface" means the lower boundary of the atmosphere. + + + + m + + + Altitude is the (geometric) height above the geoid, which is the reference geopotential surface. The geoid is similar to mean sea level. 'Water surface reference datum altitude' means the altitude of the arbitrary datum referred to by a quantity with standard name 'water_surface_height_above_reference_datum'. The surface called "surface" means the lower boundary of the atmosphere. + + + + m + + + Depth is the vertical distance below the surface. The water table is the surface below which the soil is saturated with water such that all pore spaces are filled. + + + + Pa + 55 + + The partial pressure of a gaseous constituent of air is the pressure that it would exert if all other gaseous constituents were removed, assuming the volume, the temperature, and its number of moles remain unchanged. + + + + Pa + 56 + + "Water vapor saturation deficit" is the difference between the saturation water vapor partial pressure and the actual water vapor partial pressure in air. + + + + m3 s-1 + + + The water flux or volume transport in rivers is the amount of water flowing in the river channel and flood plain. "Water" means water in all phases. + + + + m3 s-1 + + + "Water" means water in all phases. The water flux or volume transport into sea water from rivers is the inflow to the ocean, often applied to the surface in ocean models. "River" refers to water in the fluvial system (stream and floodplain). + + + + s-1 + + + Frequency is the number of oscillations of a wave per unit time. + + + + s-2 + + + A quantity with standard name Xward_Yward_derivative_of_geopotential is a second spatial derivative of geopotential, P, in the direction specified by X and Y, i.e., d2P/dXdY. Geopotential is the sum of the specific gravitational potential energy relative to the geoid and the specific centripetal potential energy. "Westward" indicates a vector component which is positive when directed westward (negative eastward). "Upward" indicates a vector component which is positive when directed upward (negative downward). "component_derivative_of_X" means derivative of X with respect to distance in the component direction, which may be "northward", "southward", "eastward", "westward", "x" or "y". The last two indicate derivatives along the axes of the grid, in the case where they are not true longitude and latitude. + + + + s-2 + + + A quantity with standard name Xward_Yward_derivative_of_geopotential is a second spatial derivative of geopotential, P, in the direction specified by X and Y, i.e., d2P/dXdY. Geopotential is the sum of the specific gravitational potential energy relative to the geoid and the specific centripetal potential energy. "Westward" indicates a vector component which is positive when directed westward (negative eastward). "component_derivative_of_X" means derivative of X with respect to distance in the component direction, which may be "northward", "southward", "eastward", "westward", "x" or "y". The last two indicate derivatives along the axes of the grid, in the case where they are not true longitude and latitude. + + + + K + + + Wet bulb potential temperature is the temperature a parcel of air would have if moved dry adiabatically until it reaches saturation and thereafter moist adiabatically to sea level pressure. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). + + + + K + + + It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). + + + + K + + + Air temperature is the bulk temperature of the air, not the surface (skin) temperature. The quantity with standard name wind_chill_of_air_temperature is the perceived air temperature when wind is factored in with the ambient air temperature (which makes it feel colder than the actual air temperature). Wind chill is based on the rate of heat loss from exposed skin caused by wind and cold. Wind chill temperature is only defined for ambient temperatures at or below 283.1 K and wind speeds above 1.34 m s-1. References: https://www.weather.gov/safety/cold-wind-chill-chart; WMO codes registry entry http://codes.wmo.int/grib2/codeflag/4.2/0-0-13. It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant), in order to convert the units correctly (cf. https://cfconventions.org/cf-conventions/cf-conventions.html#temperature-units). + + + + degree + 31 + + Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name upward_air_velocity.) In meteorological reports, the direction of the wind vector is usually (but not always) given as the direction from which it is blowing (wind_from_direction) (westerly, northerly, etc.). In other contexts, such as atmospheric modelling, it is often natural to give the direction in the usual manner of vectors as the heading or the direction to which it is blowing (wind_to_direction) (eastward, southward, etc.) "from_direction" is used in the construction X_from_direction and indicates the direction from which the velocity vector of X is coming. + + + + degree + + + The phrase "from_direction" is used in the construction X_from_direction and indicates the direction from which the velocity vector of X is coming. The direction is a bearing in the usual geographical sense, measured positive clockwise from due north. A gust is a sudden brief period of high wind speed. In an observed time series of wind speed, the gust wind speed can be indicated by a cell_methods of "maximum" for the time-interval. In an atmospheric model which has a parametrised calculation of gustiness, the gust wind speed may be separately diagnosed from the wind speed. Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name "upward_air_velocity".) In meteorological reports, the direction of the wind vector is usually (but not always) given as the direction from which it is blowing ("wind_from_direction") (westerly, northerly, etc.). In other contexts, such as atmospheric modelling, it is often natural to give the direction in the usual manner of vectors as the heading or the direction to which it is blowing ("wind_to_direction") (eastward, southward, etc.). + + + + W m-2 + 126 + + Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name upward_air_velocity.) In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. + + + + m s-1 + 32 + + Speed is the magnitude of velocity. Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name upward_air_velocity.) The wind speed is the magnitude of the wind velocity. + + + + m s-1 + + + Speed is the magnitude of velocity. Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name upward_air_velocity.) The wind speed is the magnitude of the wind velocity. A gust is a sudden brief period of high wind speed. In an observed timeseries of wind speed, the gust wind speed can be indicated by a cell_methods of maximum for the time-interval. In an atmospheric model which has a parametrised calculation of gustiness, the gust wind speed may be separately diagnosed from the wind speed. + + + + m s-1 + + + Speed is the magnitude of velocity. Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name upward_air_velocity.) The wind speed is the magnitude of the wind velocity. A gust is a sudden brief period of high wind speed. In an observed timeseries of wind speed, the gust wind speed can be indicated by a cell_methods of maximum for the time-interval. In an atmospheric model which has a parametrised calculation of gustiness, the gust wind speed may be separately diagnosed from the wind speed. The specification of a physical process by the phrase "due_to" process means that the quantity named is a single term in a list of terms, the maximum of which composes the general quantity named by omitting the phrase. + + + + m s-1 + + + Speed is the magnitude of velocity. Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name upward_air_velocity.) The wind speed is the magnitude of the wind velocity. A gust is a sudden brief period of high wind speed. In an observed timeseries of wind speed, the gust wind speed can be indicated by a cell_methods of maximum for the time-interval. In an atmospheric model which has a parametrised calculation of gustiness, the gust wind speed may be separately diagnosed from the wind speed. The specification of a physical process by the phrase "due_to" process means that the quantity named is a single term in a list of terms, the maximum of which composes the general quantity named by omitting the phrase. + + + + s-1 + N136 + + Speed is the magnitude of velocity. Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name upward_air_velocity.) The wind speed is the magnitude of the wind velocity. Wind speed shear is the derivative of wind speed with respect to height. + + + + degree + + + Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name upward_air_velocity.) In meteorological reports, the direction of the wind vector is usually (but not always) given as the direction from which it is blowing (wind_from_direction) (westerly, northerly, etc.). In other contexts, such as atmospheric modelling, it is often natural to give the direction in the usual manner of vectors as the heading or the direction to which it is blowing (wind_to_direction) (eastward, southward, etc.) "to_direction" is used in the construction X_to_direction and indicates the direction towards which the velocity vector of X is headed. The direction is a bearing in the usual geographical sense, measured positive clockwise from due north. + + + + kg m-2 + + + "Content" indicates a quantity per unit area. "Wood debris" means dead organic matter composed of coarse wood. It is distinct from fine litter. The precise distinction between "fine" and "coarse" is model dependent. + + + + kg m-2 + + + "Content" indicates a quantity per unit area. "Wood debris" means dead organic matter composed of coarse wood. It is distinct from fine litter. The precise distinction between "fine" and "coarse" is model dependent. The sum of the quantities with standard names wood_debris_mass_content_of_nitrogen, surface_litter_mass_content_of_nitrogen and subsurface_litter_mass_content_of_nitrogen is the total nitrogen mass content of dead plant material. + + + + Pa m-1 + + + "component_derivative_of_X" means the derivative of X with respect to distance in the component direction, which may be northward, southward, eastward, westward, x or y. The last two indicate derivatives along the axes of the grid, whether or not they are true longitude and latitude. x_derivative_of_ocean_rigid_lid_pressure means (d/dx) of the ocean surface pressure, as derived by a rigid lid approximation, keeping the other horizontal coordinate (y, presumably) constant. + + + + W m-2 + + + "x" indicates a vector component along the grid x-axis, positive with increasing x. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. + + + + m s-1 + + + "x" indicates a vector component along the grid x-axis, positive with increasing x. Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name upward_air_velocity.) + + + + m s-1 + + + "x" indicates a vector component along the grid x-axis, positive with increasing x. Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name upward_air_velocity.) A gust is a sudden brief period of high wind speed. In an observed timeseries of wind speed, the gust wind speed can be indicated by a cell_methods of maximum for the time-interval. In an atmospheric model which has a parametrised calculation of gustiness, the gust wind speed may be separately diagnosed from the wind speed. + + + + Pa m-1 + + + "component_derivative_of_X" means the derivative of X with respect to distance in the component direction, which may be northward, southward, eastward, westward, x or y. The last two indicate derivatives along the axes of the grid, whether or not they are true longitude and latitude. y_derivative_of_ocean_rigid_lid_pressure means (d/dy) of the ocean surface pressure, as derived by a rigid lid approximation, keeping the other horizontal coordinate (x, presumably) constant. + + + + W m-2 + + + "y" indicates a vector component along the grid y-axis, positive with increasing y. The specification of a physical process by the phrase due_to_process means that the quantity named is a single term in a sum of terms which together compose the general quantity named by omitting the phrase. In accordance with common usage in geophysical disciplines, "flux" implies per unit area, called "flux density" in physics. + + + + m s-1 + + + "y" indicates a vector component along the grid y-axis, positive with increasing y. Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name upward_air_velocity.) + + + + m s-1 + + + "y" indicates a vector component along the grid y-axis, positive with increasing y. Wind is defined as a two-dimensional (horizontal) air velocity vector, with no vertical component. (Vertical motion in the atmosphere has the standard name upward_air_velocity.) A gust is a sudden brief period of high wind speed. In an observed time series of wind speed, the gust wind speed can be indicated by a cell_methods of maximum for the time-interval. In an atmospheric model which has a parametrised calculation of gustiness, the gust wind speed may be separately diagnosed from the wind speed. + + + + degree + + + Zenith angle is the angle to the local vertical; a value of zero is directly overhead. + + + + + isotropic_longwave_radiance_in_air + + + + isotropic_shortwave_radiance_in_air + + + + mole_fraction_of_ozone_in_air + + + + product_of_northward_wind_and_specific_humidity + + + + radiation_wavelength + + + + specific_gravitational_potential_energy + + + + surface_drag_coefficient_for_heat_in_air + + + + surface_drag_coefficient_for_momentum_in_air + + + + surface_drag_coefficient_in_air + + + + sea_surface_swell_wave_period + + + + sea_surface_wind_wave_period + + + + mass_fraction_of_convective_cloud_condensed_water_in_air + + + + mass_fraction_of_ozone_in_air + + + + wave_frequency + + + + northward_eliassen_palm_flux_in_air + + + + northward_heat_flux_in_air_due_to_eddy_advection + + + + upward_eliassen_palm_flux_in_air + + + + upward_eastward_momentum_flux_in_air_due_to_nonorographic_eastward_gravity_waves + + + + upward_eastward_momentum_flux_in_air_due_to_nonorographic_westward_gravity_waves + + + + upward_eastward_momentum_flux_in_air_due_to_orographic_gravity_waves + + + + water_flux_into_sea_water + + + + wind_mixing_energy_flux_into_sea_water + + + + surface_net_downward_radiative_flux + + + + surface_upward_sensible_heat_flux + + + + atmosphere_moles_of_carbon_monoxide + + + + atmosphere_moles_of_methane + + + + atmosphere_moles_of_methyl_bromide + + + + atmosphere_moles_of_methyl_chloride + + + + atmosphere_moles_of_molecular_hydrogen + + + + atmosphere_moles_of_nitrous_oxide + + + + mass_concentration_of_suspended_matter_in_sea_water + + + + mole_concentration_of_mesozooplankton_expressed_as_nitrogen_in_sea_water + + + + mole_concentration_of_microzooplankton_expressed_as_nitrogen_in_sea_water + + + + mole_concentration_of_organic_detritus_expressed_as_nitrogen_in_sea_water + + + + mole_concentration_of_organic_detritus_expressed_as_silicon_in_sea_water + + + + tendency_of_atmosphere_moles_of_methyl_bromide + + + + tendency_of_atmosphere_moles_of_methyl_chloride + + + + tendency_of_atmosphere_moles_of_molecular_hydrogen + + + + tendency_of_atmosphere_moles_of_nitrous_oxide + + + + tendency_of_middle_atmosphere_moles_of_carbon_monoxide + + + + tendency_of_middle_atmosphere_moles_of_methane + + + + tendency_of_middle_atmosphere_moles_of_methyl_bromide + + + + tendency_of_middle_atmosphere_moles_of_methyl_chloride + + + + tendency_of_middle_atmosphere_moles_of_molecular_hydrogen + + + + tendency_of_troposphere_moles_of_carbon_monoxide + + + + tendency_of_troposphere_moles_of_methane + + + + tendency_of_troposphere_moles_of_methyl_bromide + + + + tendency_of_troposphere_moles_of_methyl_chloride + + + + tendency_of_troposphere_moles_of_molecular_hydrogen + + + + atmosphere_net_upward_convective_mass_flux + + + + eastward_water_vapor_flux_in_air + + + + kinetic_energy_dissipation_in_atmosphere_boundary_layer + + + + lwe_stratiform_snowfall_rate + + + + lwe_thickness_of_stratiform_snowfall_amount + + + + northward_water_vapor_flux_in_air + + + + stratiform_rainfall_amount + + + + stratiform_rainfall_flux + + + + stratiform_rainfall_rate + + + + stratiform_snowfall_amount + + + + stratiform_snowfall_flux + + + + thickness_of_stratiform_rainfall_amount + + + + thickness_of_stratiform_snowfall_amount + + + + atmosphere_mass_content_of_cloud_condensed_water + + + + atmosphere_mass_content_of_cloud_ice + + + + atmosphere_mass_content_of_convective_cloud_condensed_water + + + + atmosphere_mass_content_of_water_vapor + + + + surface_downward_mole_flux_of_carbon_dioxide + surface_upward_mole_flux_of_carbon_dioxide + + + + atmosphere_mass_content_of_sulfate + + + + atmosphere_mass_content_of_sulfate + + + + change_over_time_in_atmosphere_mass_content_of_water_due_to_advection + + + + change_over_time_in_atmosphere_mass_content_of_water_due_to_advection + + + + lwe_thickness_of_atmosphere_mass_content_of_water_vapor + + + + mass_content_of_cloud_condensed_water_in_atmosphere_layer + + + + mass_content_of_cloud_ice_in_atmosphere_layer + + + + mass_content_of_water_in_atmosphere_layer + + + + mass_content_of_water_vapor_in_atmosphere_layer + + + + tendency_of_atmosphere_mass_content_of_water_due_to_advection - - surface_drag_coefficient_in_air + + tendency_of_atmosphere_mass_content_of_water_vapor - - sea_surface_swell_wave_period + + tendency_of_atmosphere_mass_content_of_water_vapor_due_to_convection - - sea_surface_wind_wave_period + + tendency_of_atmosphere_mass_content_of_water_vapor_due_to_deep_convection - - volume_fraction_of_condensed_water_in_soil + + tendency_of_atmosphere_mass_content_of_water_vapor_due_to_shallow_convection - - volume_fraction_of_condensed_water_in_soil_at_critical_point + + tendency_of_atmosphere_mass_content_of_water_vapor_due_to_turbulence - - volume_fraction_of_condensed_water_in_soil_at_field_capacity + + tendency_of_mass_content_of_water_vapor_in_atmosphere_layer - - volume_fraction_of_condensed_water_in_soil_at_wilting_point + + tendency_of_mass_content_of_water_vapor_in_atmosphere_layer_due_to_convection - - mass_fraction_of_convective_cloud_condensed_water_in_air + + tendency_of_mass_content_of_water_vapor_in_atmosphere_layer_due_to_deep_convection - - mass_fraction_of_ozone_in_air + + tendency_of_mass_content_of_water_vapor_in_atmosphere_layer_due_to_shallow_convection - - wave_frequency + + tendency_of_mass_content_of_water_vapor_in_atmosphere_layer_due_to_turbulence - - northward_eliassen_palm_flux_in_air + + equivalent_thickness_at_stp_of_atmosphere_ozone_content - - northward_heat_flux_in_air_due_to_eddy_advection + + sea_water_x_velocity - - upward_eliassen_palm_flux_in_air + + sea_water_y_velocity - - upward_eastward_momentum_flux_in_air_due_to_nonorographic_eastward_gravity_waves + + x_wind - - upward_eastward_momentum_flux_in_air_due_to_nonorographic_westward_gravity_waves + + y_wind - - upward_eastward_momentum_flux_in_air_due_to_orographic_gravity_waves + + tendency_of_atmosphere_mass_content_of_water_vapor_due_to_advection - - water_flux_into_sea_water + + land_ice_surface_specific_mass_balance_rate - - wind_mixing_energy_flux_into_sea_water + + land_ice_lwe_surface_specific_mass_balance_rate - - mole_fraction_of_chlorine_dioxide_in_air + + isotropic_radiance_per_unit_wavelength_in_air - - mole_fraction_of_chlorine_monoxide_in_air + + isotropic_radiance_per_unit_wavelength_in_air - - mole_fraction_of_dichlorine_peroxide_in_air + + omnidirectional_spherical_irradiance_per_unit_wavelength_in_sea_water - - mole_fraction_of_hypochlorous_acid_in_air + + mass_concentration_of_chlorophyll_in_sea_water - - surface_net_downward_radiative_flux + + mass_concentration_of_chlorophyll_in_sea_water - - surface_snow_thickness + + atmosphere_convective_available_potential_energy - - surface_temperature + + atmosphere_convective_available_potential_energy - - surface_temperature + + gross_primary_productivity_of_biomass_expressed_as_carbon - - surface_temperature + + net_primary_productivity_of_biomass_expressed_as_carbon - - surface_upward_sensible_heat_flux + + net_primary_productivity_of_biomass_expressed_as_carbon_accumulated_in_leaves - - atmosphere_moles_of_carbon_monoxide + + net_primary_productivity_of_biomass_expressed_as_carbon_accumulated_in_roots - - atmosphere_moles_of_carbon_tetrachloride + + net_primary_productivity_of_biomass_expressed_as_carbon_accumulated_in_wood - - atmosphere_moles_of_cfc11 + + atmosphere_mass_content_of_particulate_organic_matter_dry_aerosol_particles - - atmosphere_moles_of_cfc113 + + atmosphere_mass_content_of_primary_particulate_organic_matter_dry_aerosol_particles - - atmosphere_moles_of_cfc114 + + mass_fraction_of_particulate_organic_matter_dry_aerosol_particles_in_air - - atmosphere_moles_of_cfc115 + + mass_fraction_of_primary_particulate_organic_matter_dry_aerosol_particles_in_air - - atmosphere_moles_of_cfc12 + + tendency_of_atmosphere_mass_content_of_particulate_organic_matter_dry_aerosol_particles_due_to_dry_deposition - - atmosphere_moles_of_halon1202 + + tendency_of_atmosphere_mass_content_of_particulate_organic_matter_dry_aerosol_particles_due_to_gravitational_settling - - atmosphere_moles_of_halon1211 + + tendency_of_atmosphere_mass_content_of_particulate_organic_matter_dry_aerosol_particles_due_to_turbulent_deposition - - atmosphere_moles_of_halon1301 + + tendency_of_atmosphere_mass_content_of_particulate_organic_matter_dry_aerosol_particles_due_to_wet_deposition - - atmosphere_moles_of_halon2402 + + tendency_of_atmosphere_mass_content_of_primary_particulate_organic_matter_dry_aerosol_particles_due_to_dry_deposition - - atmosphere_moles_of_hcc140a + + tendency_of_atmosphere_mass_content_of_primary_particulate_organic_matter_dry_aerosol_particles_due_to_wet_deposition - - atmosphere_moles_of_hcfc22 + + atmosphere_absorption_optical_thickness_due_to_ambient_aerosol_particles - - atmosphere_moles_of_methane + + angstrom_exponent_of_ambient_aerosol_in_air - - atmosphere_moles_of_methyl_bromide + + atmosphere_absorption_optical_thickness_due_to_dust_ambient_aerosol_particles - - atmosphere_moles_of_methyl_chloride + + atmosphere_absorption_optical_thickness_due_to_particulate_organic_matter_ambient_aerosol_particles - - atmosphere_moles_of_molecular_hydrogen + + atmosphere_absorption_optical_thickness_due_to_sulfate_ambient_aerosol_particles - - atmosphere_moles_of_nitrous_oxide + + atmosphere_mass_content_of_ammonium_dry_aerosol_particles - - mass_concentration_of_suspended_matter_in_sea_water + + atmosphere_mass_content_of_dust_dry_aerosol_particles - - mole_concentration_of_diatoms_expressed_as_nitrogen_in_sea_water + + atmosphere_mass_content_of_mercury_dry_aerosol_particles - - mole_concentration_of_mesozooplankton_expressed_as_nitrogen_in_sea_water + + atmosphere_mass_content_of_nitrate_dry_aerosol_particles - - mole_concentration_of_microzooplankton_expressed_as_nitrogen_in_sea_water + + atmosphere_mass_content_of_nitric_acid_trihydrate_ambient_aerosol_particles - - mole_concentration_of_organic_detritus_expressed_as_nitrogen_in_sea_water + + atmosphere_mass_content_of_secondary_particulate_organic_matter_dry_aerosol_particles - - mole_concentration_of_organic_detritus_expressed_as_silicon_in_sea_water + + atmosphere_mass_content_of_sulfate_ambient_aerosol_particles - - mole_concentration_of_phytoplankton_expressed_as_nitrogen_in_sea_water + + atmosphere_mass_content_of_sulfate_ambient_aerosol_particles - - mole_fraction_of_inorganic_bromine_in_air + + atmosphere_mass_content_of_sulfate_dry_aerosol_particles - - mole_fraction_of_noy_expressed_as_nitrogen_in_air + + atmosphere_mass_content_of_water_in_ambient_aerosol_particles - - tendency_of_atmosphere_moles_of_carbon_monoxide + + atmosphere_moles_of_nitric_acid_trihydrate_ambient_aerosol_particles - - tendency_of_atmosphere_moles_of_carbon_tetrachloride + + atmosphere_optical_thickness_due_to_ambient_aerosol_particles - - tendency_of_atmosphere_moles_of_cfc11 + + atmosphere_optical_thickness_due_to_ambient_aerosol_particles - - tendency_of_atmosphere_moles_of_cfc113 + + atmosphere_optical_thickness_due_to_dust_ambient_aerosol_particles - - tendency_of_atmosphere_moles_of_cfc114 + + atmosphere_optical_thickness_due_to_dust_dry_aerosol_particles - - tendency_of_atmosphere_moles_of_cfc115 + + atmosphere_optical_thickness_due_to_particulate_organic_matter_ambient_aerosol_particles - - tendency_of_atmosphere_moles_of_cfc12 + + mass_concentration_of_dust_dry_aerosol_particles_in_air - - tendency_of_atmosphere_moles_of_halon1202 + + mass_concentration_of_coarse_mode_ambient_aerosol_particles_in_air - - tendency_of_atmosphere_moles_of_halon1211 + + mass_concentration_of_ammonium_dry_aerosol_particles_in_air - - tendency_of_atmosphere_moles_of_halon1301 + + atmosphere_mass_content_of_sulfate_dry_aerosol_particles_expressed_as_sulfur - - tendency_of_atmosphere_moles_of_halon2402 + + atmosphere_mass_content_of_sulfate_dry_aerosol_particles_expressed_as_sulfur - - tendency_of_atmosphere_moles_of_hcc140a + + mass_concentration_of_primary_particulate_organic_matter_dry_aerosol_particles_in_air - - tendency_of_atmosphere_moles_of_hcfc22 + + mass_concentration_of_particulate_organic_matter_dry_aerosol_particles_in_air - - tendency_of_atmosphere_moles_of_methane + + atmosphere_optical_thickness_due_to_water_in_ambient_aerosol_particles - - tendency_of_atmosphere_moles_of_methyl_bromide + + mass_concentration_of_mercury_dry_aerosol_particles_in_air - - tendency_of_atmosphere_moles_of_methyl_chloride + + mass_concentration_of_nitrate_dry_aerosol_particles_in_air - - tendency_of_atmosphere_moles_of_molecular_hydrogen + + mass_concentration_of_nitric_acid_trihydrate_ambient_aerosol_particles_in_air - - tendency_of_atmosphere_moles_of_nitrous_oxide + + mass_concentration_of_secondary_particulate_organic_matter_dry_aerosol_particles_in_air - - tendency_of_middle_atmosphere_moles_of_carbon_monoxide + + mass_concentration_of_sulfate_ambient_aerosol_particles_in_air - - tendency_of_middle_atmosphere_moles_of_hcc140a + + mass_concentration_of_sulfate_ambient_aerosol_particles_in_air - - tendency_of_middle_atmosphere_moles_of_methane + + mass_concentration_of_sulfate_dry_aerosol_particles_in_air - - tendency_of_middle_atmosphere_moles_of_methyl_bromide + + mass_concentration_of_water_in_ambient_aerosol_particles_in_air - - tendency_of_middle_atmosphere_moles_of_methyl_chloride + + mass_fraction_of_ammonium_dry_aerosol_particles_in_air - - tendency_of_middle_atmosphere_moles_of_molecular_hydrogen + + mass_fraction_of_dust_dry_aerosol_particles_in_air - - tendency_of_troposphere_moles_of_carbon_monoxide + + mass_fraction_of_nitrate_dry_aerosol_particles_in_air - - tendency_of_troposphere_moles_of_hcc140a + + mass_fraction_of_nitric_acid_trihydrate_ambient_aerosol_particles_in_air - - tendency_of_troposphere_moles_of_hcfc22 + + mass_fraction_of_secondary_particulate_organic_matter_dry_aerosol_particles_in_air - - tendency_of_troposphere_moles_of_methane + + mass_fraction_of_sulfate_dry_aerosol_particles_in_air - - tendency_of_troposphere_moles_of_methyl_bromide + + mass_fraction_of_water_in_ambient_aerosol_particles_in_air - - tendency_of_troposphere_moles_of_methyl_chloride + + mole_concentration_of_nitric_acid_trihydrate_ambient_aerosol_particles_in_air - - tendency_of_troposphere_moles_of_molecular_hydrogen + + mole_fraction_of_nitric_acid_trihydrate_ambient_aerosol_particles_in_air - - atmosphere_net_upward_convective_mass_flux + + number_concentration_of_ambient_aerosol_particles_in_air - - moles_of_cfc11_per_unit_mass_in_sea_water + + number_concentration_of_coarse_mode_ambient_aerosol_particles_in_air - - tendency_of_mass_fraction_of_stratiform_cloud_ice_in_air_due_to_melting_to_cloud_liquid_water + + number_concentration_of_nucleation_mode_ambient_aerosol_particles_in_air - - tendency_of_mass_fraction_of_stratiform_cloud_ice_in_air_due_to_riming_from_cloud_liquid_water + + optical_thickness_of_atmosphere_layer_due_to_ambient_aerosol_particles - - tendency_of_mass_fraction_of_stratiform_cloud_ice_in_air_due_to_heterogeneous_nucleation_from_cloud_liquid_water + + optical_thickness_of_atmosphere_layer_due_to_ambient_aerosol_particles - - eastward_water_vapor_flux_in_air + + tendency_of_atmosphere_mass_content_of_ammonium_dry_aerosol_particles_due_to_dry_deposition - - kinetic_energy_dissipation_in_atmosphere_boundary_layer + + tendency_of_atmosphere_mass_content_of_ammonium_dry_aerosol_particles_due_to_wet_deposition - - liquid_water_content_of_surface_snow + + tendency_of_atmosphere_mass_content_of_dust_dry_aerosol_particles_due_to_dry_deposition - - lwe_stratiform_snowfall_rate + + tendency_of_atmosphere_mass_content_of_dust_dry_aerosol_particles_due_to_gravitational_settling - - lwe_thickness_of_stratiform_snowfall_amount + + tendency_of_atmosphere_mass_content_of_dust_dry_aerosol_particles_due_to_turbulent_deposition - - northward_water_vapor_flux_in_air + + tendency_of_atmosphere_mass_content_of_dust_dry_aerosol_particles_due_to_wet_deposition - - soot_content_of_surface_snow + + tendency_of_atmosphere_mass_content_of_mercury_dry_aerosol_particles_due_to_dry_deposition - - stratiform_cloud_area_fraction + + tendency_of_atmosphere_mass_content_of_mercury_dry_aerosol_particles_due_to_wet_deposition - - stratiform_rainfall_amount + + tendency_of_atmosphere_mass_content_of_nitrate_dry_aerosol_particles_due_to_dry_deposition - - stratiform_rainfall_flux + + tendency_of_atmosphere_mass_content_of_secondary_particulate_organic_matter_dry_aerosol_particles_due_to_dry_deposition - - stratiform_rainfall_rate + + tendency_of_atmosphere_mass_content_of_secondary_particulate_organic_matter_dry_aerosol_particles_due_to_net_chemical_production - - stratiform_snowfall_amount + + tendency_of_atmosphere_mass_content_of_secondary_particulate_organic_matter_dry_aerosol_particles_due_to_net_chemical_production - - stratiform_snowfall_flux + + tendency_of_atmosphere_mass_content_of_secondary_particulate_organic_matter_dry_aerosol_particles_due_to_wet_deposition - - temperature_in_surface_snow + + tendency_of_atmosphere_mass_content_of_sulfate_dry_aerosol_particles_due_to_dry_deposition - - thermal_energy_content_of_surface_snow + + tendency_of_atmosphere_mass_content_of_sulfate_dry_aerosol_particles_expressed_as_sulfur_due_to_dry_deposition - - thickness_of_stratiform_rainfall_amount + + tendency_of_atmosphere_mass_content_of_sulfate_dry_aerosol_particles_expressed_as_sulfur_due_to_dry_deposition - - thickness_of_stratiform_snowfall_amount + + tendency_of_atmosphere_mass_content_of_sulfate_dry_aerosol_particles_expressed_as_sulfur_due_to_gravitational_settling - - water_vapor_partial_pressure_in_air + + tendency_of_atmosphere_mass_content_of_sulfate_dry_aerosol_particles_expressed_as_sulfur_due_to_gravitational_settling - - water_vapor_saturation_deficit_in_air + + tendency_of_atmosphere_mass_content_of_sulfate_dry_aerosol_particles_expressed_as_sulfur_due_to_turbulent_deposition - - atmosphere_mass_content_of_cloud_condensed_water + + tendency_of_atmosphere_mass_content_of_sulfate_dry_aerosol_particles_expressed_as_sulfur_due_to_turbulent_deposition - - atmosphere_mass_content_of_cloud_ice + + tendency_of_atmosphere_moles_of_nitric_acid_trihydrate_ambient_aerosol_particles - - atmosphere_mass_content_of_cloud_liquid_water + + tendency_of_atmosphere_moles_of_sulfate_dry_aerosol_particles - - atmosphere_mass_content_of_convective_cloud_condensed_water + + tendency_of_atmosphere_mass_content_of_sulfate_dry_aerosol_particles_due_to_emission - - atmosphere_mass_content_of_convective_cloud_liquid_water + + tendency_of_atmosphere_mass_content_of_dust_dry_aerosol_particles_due_to_emission - - atmosphere_mass_content_of_water_vapor + + tendency_of_atmosphere_mass_content_of_particulate_organic_matter_dry_aerosol_particles_expressed_as_carbon_due_to_emission_from_residential_and_commercial_combustion - - surface_downward_mole_flux_of_carbon_dioxide + + tendency_of_atmosphere_mass_content_of_particulate_organic_matter_dry_aerosol_particles_expressed_as_carbon_due_to_emission_from_waste_treatment_and_disposal - - surface_upward_mole_flux_of_carbon_dioxide + + tendency_of_atmosphere_mass_content_of_particulate_organic_matter_dry_aerosol_particles_expressed_as_carbon_due_to_emission_from_savanna_and_grassland_fires - - atmosphere_mass_content_of_sulfate + + tendency_of_atmosphere_mass_content_of_primary_particulate_organic_matter_dry_aerosol_particles_due_to_emission - - atmosphere_mass_content_of_sulfate + + tendency_of_atmosphere_mass_content_of_particulate_organic_matter_dry_aerosol_particles_expressed_as_carbon_due_to_emission_from_land_transport - - change_over_time_in_atmosphere_mass_content_of_water_due_to_advection + + tendency_of_atmosphere_mass_content_of_particulate_organic_matter_dry_aerosol_particles_expressed_as_carbon_due_to_emission_from_agricultural_waste_burning - - change_over_time_in_atmosphere_mass_content_of_water_due_to_advection + + tendency_of_atmosphere_mass_content_of_particulate_organic_matter_dry_aerosol_particles_expressed_as_carbon_due_to_emission_from_energy_production_and_distribution - - lwe_thickness_of_atmosphere_mass_content_of_water_vapor + + tendency_of_atmosphere_mass_content_of_particulate_organic_matter_dry_aerosol_particles_expressed_as_carbon_due_to_emission_from_maritime_transport - - mass_content_of_cloud_condensed_water_in_atmosphere_layer + + tendency_of_atmosphere_mass_content_of_particulate_organic_matter_dry_aerosol_particles_due_to_net_chemical_production_and_emission - - mass_content_of_cloud_ice_in_atmosphere_layer + + tendency_of_atmosphere_mass_content_of_particulate_organic_matter_dry_aerosol_particles_due_to_net_chemical_production_and_emission - - mass_content_of_cloud_liquid_water_in_atmosphere_layer + + tendency_of_atmosphere_mass_content_of_particulate_organic_matter_dry_aerosol_particles_expressed_as_carbon_due_to_emission_from_forest_fires - - mass_content_of_water_in_atmosphere_layer + + tendency_of_atmosphere_mass_content_of_particulate_organic_matter_dry_aerosol_particles_expressed_as_carbon_due_to_emission_from_industrial_processes_and_combustion - - mass_content_of_water_vapor_in_atmosphere_layer + + sea_surface_swell_wave_significant_height - - tendency_of_atmosphere_mass_content_of_water_due_to_advection + + sea_surface_wind_wave_significant_height - - tendency_of_atmosphere_mass_content_of_water_vapor + + sea_surface_wave_significant_height - - tendency_of_atmosphere_mass_content_of_water_vapor_due_to_convection + + mass_content_of_water_in_soil_layer - - tendency_of_atmosphere_mass_content_of_water_vapor_due_to_deep_convection + + mass_content_of_water_in_soil - - tendency_of_atmosphere_mass_content_of_water_vapor_due_to_shallow_convection + + sea_surface_swell_wave_to_direction - - tendency_of_atmosphere_mass_content_of_water_vapor_due_to_turbulence + + sea_surface_wind_wave_to_direction - - tendency_of_mass_content_of_water_vapor_in_atmosphere_layer + + sea_surface_wave_mean_period - - tendency_of_mass_content_of_water_vapor_in_atmosphere_layer_due_to_convection + + sea_surface_wind_wave_mean_period - - tendency_of_mass_content_of_water_vapor_in_atmosphere_layer_due_to_deep_convection + + sea_surface_swell_wave_mean_period - - tendency_of_mass_content_of_water_vapor_in_atmosphere_layer_due_to_shallow_convection + + ocean_mixed_layer_thickness_defined_by_vertical_tracer_diffusivity_deficit - - tendency_of_mass_content_of_water_vapor_in_atmosphere_layer_due_to_turbulence + + atmosphere_mass_content_of_sea_salt_dry_aerosol_particles - - equivalent_thickness_at_stp_of_atmosphere_ozone_content + + atmosphere_mass_content_of_sea_salt_dry_aerosol_particles - - sea_water_x_velocity + + atmosphere_optical_thickness_due_to_sea_salt_ambient_aerosol_particles - - sea_water_y_velocity + + atmosphere_optical_thickness_due_to_sea_salt_ambient_aerosol_particles - - x_wind + + mass_concentration_of_sea_salt_dry_aerosol_particles_in_air - - y_wind + + mass_concentration_of_sea_salt_dry_aerosol_particles_in_air - - tendency_of_atmosphere_mass_content_of_water_vapor_due_to_advection + + mass_fraction_of_sea_salt_dry_aerosol_particles_in_air - - land_ice_surface_specific_mass_balance_rate + + mass_fraction_of_sea_salt_dry_aerosol_particles_in_air - - land_ice_lwe_surface_specific_mass_balance_rate + + tendency_of_atmosphere_mass_content_of_pm10_sea_salt_dry_aerosol_particles_due_to_dry_deposition - - isotropic_radiance_per_unit_wavelength_in_air + + tendency_of_atmosphere_mass_content_of_pm10_sea_salt_dry_aerosol_particles_due_to_emission - - isotropic_radiance_per_unit_wavelength_in_air + + tendency_of_atmosphere_mass_content_of_pm10_sea_salt_dry_aerosol_particles_due_to_wet_deposition - - omnidirectional_spherical_irradiance_per_unit_wavelength_in_sea_water + + tendency_of_atmosphere_mass_content_of_pm2p5_sea_salt_dry_aerosol_particles_due_to_wet_deposition - - mass_concentration_of_chlorophyll_in_sea_water + + tendency_of_atmosphere_mass_content_of_sea_salt_dry_aerosol_particles_due_to_dry_deposition - - mass_concentration_of_chlorophyll_in_sea_water + + tendency_of_atmosphere_mass_content_of_sea_salt_dry_aerosol_particles_due_to_dry_deposition - - atmosphere_convective_available_potential_energy + + tendency_of_atmosphere_mass_content_of_sea_salt_dry_aerosol_particles_due_to_gravitational_settling - - atmosphere_convective_available_potential_energy + + tendency_of_atmosphere_mass_content_of_sea_salt_dry_aerosol_particles_due_to_gravitational_settling - - net_primary_mole_productivity_of_biomass_expressed_as_carbon_by_diatoms + + tendency_of_atmosphere_mass_content_of_sea_salt_dry_aerosol_particles_due_to_turbulent_deposition - - gross_primary_productivity_of_biomass_expressed_as_carbon + + tendency_of_atmosphere_mass_content_of_sea_salt_dry_aerosol_particles_due_to_turbulent_deposition - - net_primary_mole_productivity_of_biomass_expressed_as_carbon_by_calcareous_phytoplankton + + tendency_of_atmosphere_mass_content_of_sea_salt_dry_aerosol_particles_due_to_wet_deposition - - net_primary_mole_productivity_of_biomass_expressed_as_carbon_by_miscellaneous_phytoplankton + + tendency_of_atmosphere_mass_content_of_sea_salt_dry_aerosol_particles_due_to_wet_deposition - - net_primary_mole_productivity_of_biomass_expressed_as_carbon_by_diazotrophs + + atmosphere_optical_thickness_due_to_pm1_ambient_aerosol_particles - - net_primary_mole_productivity_of_biomass_expressed_as_carbon_by_phytoplankton + + mass_concentration_of_pm1_ambient_aerosol_particles_in_air - - net_primary_mole_productivity_of_biomass_expressed_as_carbon_due_to_nitrate_utilization + + mass_fraction_of_pm1_ambient_aerosol_particles_in_air - - net_primary_productivity_of_biomass_expressed_as_carbon + + mass_fraction_of_pm1_ambient_aerosol_particles_in_air - - net_primary_productivity_of_biomass_expressed_as_carbon_accumulated_in_leaves + + atmosphere_optical_thickness_due_to_pm2p5_ambient_aerosol_particles - - net_primary_productivity_of_biomass_expressed_as_carbon_accumulated_in_roots + + mass_concentration_of_pm2p5_ambient_aerosol_particles_in_air - - net_primary_productivity_of_biomass_expressed_as_carbon_accumulated_in_wood + + mass_fraction_of_pm2p5_ambient_aerosol_particles_in_air - - net_primary_mole_productivity_of_biomass_expressed_as_carbon_by_picophytoplankton + + mass_fraction_of_pm2p5_ambient_aerosol_particles_in_air - - atmosphere_mass_content_of_particulate_organic_matter_dry_aerosol_particles + + atmosphere_optical_thickness_due_to_pm10_ambient_aerosol_particles - - atmosphere_mass_content_of_primary_particulate_organic_matter_dry_aerosol_particles + + mass_concentration_of_pm10_ambient_aerosol_particles_in_air - - mass_fraction_of_particulate_organic_matter_dry_aerosol_particles_in_air + + mass_fraction_of_pm10_ambient_aerosol_particles_in_air - - mass_fraction_of_primary_particulate_organic_matter_dry_aerosol_particles_in_air + + mass_fraction_of_pm10_ambient_aerosol_particles_in_air - - tendency_of_atmosphere_mass_content_of_particulate_organic_matter_dry_aerosol_particles_due_to_dry_deposition + + tendency_of_atmosphere_mass_content_of_pm2p5_sea_salt_dry_aerosol_particles_due_to_dry_deposition - - tendency_of_atmosphere_mass_content_of_particulate_organic_matter_dry_aerosol_particles_due_to_gravitational_settling + + tendency_of_atmosphere_mass_content_of_pm2p5_sea_salt_dry_aerosol_particles_due_to_emission - - tendency_of_atmosphere_mass_content_of_particulate_organic_matter_dry_aerosol_particles_due_to_turbulent_deposition + + sea_floor_depth_below_mean_sea_level - - tendency_of_atmosphere_mass_content_of_particulate_organic_matter_dry_aerosol_particles_due_to_wet_deposition + + sea_surface_height_above_mean_sea_level - - tendency_of_atmosphere_mass_content_of_primary_particulate_organic_matter_dry_aerosol_particles_due_to_dry_deposition + + sea_surface_height_above_mean_sea_level - - tendency_of_atmosphere_mass_content_of_primary_particulate_organic_matter_dry_aerosol_particles_due_to_wet_deposition + + surface_geostrophic_eastward_sea_water_velocity_assuming_mean_sea_level_for_geoid - - atmosphere_absorption_optical_thickness_due_to_ambient_aerosol_particles + + surface_geostrophic_eastward_sea_water_velocity_assuming_mean_sea_level_for_geoid - - angstrom_exponent_of_ambient_aerosol_in_air + + surface_geostrophic_northward_sea_water_velocity_assuming_mean_sea_level_for_geoid - - atmosphere_absorption_optical_thickness_due_to_dust_ambient_aerosol_particles + + surface_geostrophic_northward_sea_water_velocity_assuming_mean_sea_level_for_geoid - - atmosphere_absorption_optical_thickness_due_to_particulate_organic_matter_ambient_aerosol_particles + + surface_geostrophic_sea_water_x_velocity_assuming_mean_sea_level_for_geoid - - atmosphere_absorption_optical_thickness_due_to_sulfate_ambient_aerosol_particles + + surface_geostrophic_sea_water_y_velocity_assuming_mean_sea_level_for_geoid - - atmosphere_mass_content_of_ammonium_dry_aerosol_particles + + tendency_of_sea_surface_height_above_mean_sea_level - - atmosphere_mass_content_of_dust_dry_aerosol_particles + + surface_geostrophic_northward_sea_water_velocity - - atmosphere_mass_content_of_mercury_dry_aerosol_particles + + surface_geostrophic_eastward_sea_water_velocity - - atmosphere_mass_content_of_nitrate_dry_aerosol_particles + + tendency_of_atmosphere_mass_content_of_nitrogen_compounds_expressed_as_nitrogen_due_to_dry_deposition - - atmosphere_mass_content_of_nitric_acid_trihydrate_ambient_aerosol_particles + + tendency_of_atmosphere_mass_content_of_nitrogen_compounds_expressed_as_nitrogen_due_to_deposition - - atmosphere_mass_content_of_secondary_particulate_organic_matter_dry_aerosol_particles + + atmosphere_absorption_optical_thickness_due_to_sea_salt_ambient_aerosol_particles - - atmosphere_mass_content_of_sulfate_ambient_aerosol_particles + + atmosphere_absorption_optical_thickness_due_to_sea_salt_ambient_aerosol_particles - - atmosphere_mass_content_of_sulfate_ambient_aerosol_particles + + tendency_of_atmosphere_mass_content_of_sea_salt_dry_aerosol_particles_due_to_emission - - atmosphere_mass_content_of_sulfate_dry_aerosol_particles + + tendency_of_atmosphere_mass_content_of_sea_salt_dry_aerosol_particles_due_to_emission - - atmosphere_mass_content_of_water_in_ambient_aerosol_particles + + sea_surface_height_above_geoid - - atmosphere_moles_of_nitric_acid_trihydrate_ambient_aerosol_particles + + sea_surface_height_above_geoid - - atmosphere_optical_thickness_due_to_ambient_aerosol_particles + + sea_floor_depth_below_geoid - - atmosphere_optical_thickness_due_to_ambient_aerosol_particles + + air_pressure_at_mean_sea_level + + + + lagrangian_tendency_of_air_pressure - - atmosphere_optical_thickness_due_to_dust_ambient_aerosol_particles + + lagrangian_tendency_of_air_pressure - - atmosphere_optical_thickness_due_to_dust_dry_aerosol_particles + + mass_concentration_of_elemental_carbon_dry_aerosol_particles_in_air - - atmosphere_optical_thickness_due_to_particulate_organic_matter_ambient_aerosol_particles + + atmosphere_mass_content_of_elemental_carbon_dry_aerosol_particles - - mass_concentration_of_dust_dry_aerosol_particles_in_air + + mass_fraction_of_elemental_carbon_dry_aerosol_particles_in_air - - mass_concentration_of_coarse_mode_ambient_aerosol_particles_in_air + + tendency_of_atmosphere_mass_content_of_elemental_carbon_dry_aerosol_particles_due_to_dry_deposition - - mass_concentration_of_ammonium_dry_aerosol_particles_in_air + + tendency_of_atmosphere_mass_content_of_elemental_carbon_dry_aerosol_particles_due_to_emission - - atmosphere_mass_content_of_sulfate_dry_aerosol_particles_expressed_as_sulfur + + tendency_of_atmosphere_mass_content_of_elemental_carbon_dry_aerosol_particles_due_to_emission_from_energy_production_and_distribution - - atmosphere_mass_content_of_sulfate_dry_aerosol_particles_expressed_as_sulfur + + tendency_of_atmosphere_mass_content_of_elemental_carbon_dry_aerosol_particles_due_to_emission_from_forest_fires - - mass_concentration_of_primary_particulate_organic_matter_dry_aerosol_particles_in_air + + tendency_of_atmosphere_mass_content_of_elemental_carbon_dry_aerosol_particles_due_to_emission_from_industrial_processes_and_combustion - - mass_concentration_of_particulate_organic_matter_dry_aerosol_particles_in_air + + tendency_of_atmosphere_mass_content_of_elemental_carbon_dry_aerosol_particles_due_to_emission_from_land_transport - - atmosphere_optical_thickness_due_to_water_in_ambient_aerosol_particles + + tendency_of_atmosphere_mass_content_of_elemental_carbon_dry_aerosol_particles_due_to_emission_from_maritime_transport - - mass_concentration_of_mercury_dry_aerosol_particles_in_air + + tendency_of_atmosphere_mass_content_of_elemental_carbon_dry_aerosol_particles_due_to_emission_from_residential_and_commercial_combustion - - mass_concentration_of_nitrate_dry_aerosol_particles_in_air + + tendency_of_atmosphere_mass_content_of_elemental_carbon_dry_aerosol_particles_due_to_emission_from_savanna_and_grassland_fires - - mass_concentration_of_nitric_acid_trihydrate_ambient_aerosol_particles_in_air + + tendency_of_atmosphere_mass_content_of_elemental_carbon_dry_aerosol_particles_due_to_emission_from_waste_treatment_and_disposal - - mass_concentration_of_secondary_particulate_organic_matter_dry_aerosol_particles_in_air + + tendency_of_atmosphere_mass_content_of_elemental_carbon_dry_aerosol_particles_due_to_gravitational_settling - - mass_concentration_of_sulfate_ambient_aerosol_particles_in_air + + tendency_of_atmosphere_mass_content_of_elemental_carbon_dry_aerosol_particles_due_to_turbulent_deposition - - mass_concentration_of_sulfate_ambient_aerosol_particles_in_air + + tendency_of_atmosphere_mass_content_of_elemental_carbon_dry_aerosol_particles_due_to_wet_deposition - - mass_concentration_of_sulfate_dry_aerosol_particles_in_air + + tendency_of_mass_concentration_of_elemental_carbon_dry_aerosol_particles_in_air_due_to_emission_from_aviation - - mass_concentration_of_water_in_ambient_aerosol_particles_in_air + + integral_wrt_time_of_surface_downward_latent_heat_flux - - mass_fraction_of_ammonium_dry_aerosol_particles_in_air + + integral_wrt_time_of_surface_downward_sensible_heat_flux - - mass_fraction_of_dust_dry_aerosol_particles_in_air + + integral_wrt_time_of_surface_net_downward_longwave_flux - - mass_fraction_of_nitrate_dry_aerosol_particles_in_air + + integral_wrt_time_of_surface_net_downward_shortwave_flux - - mass_fraction_of_nitric_acid_trihydrate_ambient_aerosol_particles_in_air + + integral_wrt_time_of_toa_net_downward_shortwave_flux - - mass_fraction_of_secondary_particulate_organic_matter_dry_aerosol_particles_in_air + + integral_wrt_time_of_toa_outgoing_longwave_flux - - mass_fraction_of_sulfate_dry_aerosol_particles_in_air + + northward_ocean_freshwater_transport_due_to_parameterized_eddy_advection - - mass_fraction_of_water_in_ambient_aerosol_particles_in_air + + northward_ocean_salt_transport_due_to_parameterized_eddy_advection - - mole_concentration_of_nitric_acid_trihydrate_ambient_aerosol_particles_in_air + + ocean_heat_x_transport_due_to_parameterized_eddy_advection - - mole_fraction_of_nitric_acid_trihydrate_ambient_aerosol_particles_in_air + + ocean_heat_y_transport_due_to_parameterized_eddy_advection - - number_concentration_of_ambient_aerosol_particles_in_air + + ocean_mass_x_transport_due_to_advection_and_parameterized_eddy_advection - - number_concentration_of_coarse_mode_ambient_aerosol_particles_in_air + + ocean_mass_y_transport_due_to_advection_and_parameterized_eddy_advection - - number_concentration_of_nucleation_mode_ambient_aerosol_particles_in_air + + ocean_meridional_overturning_mass_streamfunction_due_to_parameterized_eddy_advection - - optical_thickness_of_atmosphere_layer_due_to_ambient_aerosol_particles + + ocean_y_overturning_mass_streamfunction_due_to_parameterized_eddy_advection - - optical_thickness_of_atmosphere_layer_due_to_ambient_aerosol_particles + + tendency_of_sea_water_salinity_due_to_parameterized_eddy_advection - - tendency_of_atmosphere_mass_content_of_ammonium_dry_aerosol_particles_due_to_dry_deposition + + northward_sea_water_velocity_due_to_parameterized_mesoscale_eddies - - tendency_of_atmosphere_mass_content_of_ammonium_dry_aerosol_particles_due_to_wet_deposition + + eastward_sea_water_velocity_due_to_parameterized_mesoscale_eddies - - tendency_of_atmosphere_mass_content_of_dust_dry_aerosol_particles_due_to_dry_deposition + + sea_water_x_velocity_due_to_parameterized_mesoscale_eddies - - tendency_of_atmosphere_mass_content_of_dust_dry_aerosol_particles_due_to_gravitational_settling + + sea_water_y_velocity_due_to_parameterized_mesoscale_eddies - - tendency_of_atmosphere_mass_content_of_dust_dry_aerosol_particles_due_to_turbulent_deposition + + upward_sea_water_velocity_due_to_parameterized_mesoscale_eddies - - tendency_of_atmosphere_mass_content_of_dust_dry_aerosol_particles_due_to_wet_deposition + + ocean_tracer_biharmonic_diffusivity_due_to_parameterized_mesoscale_eddy_advection - - tendency_of_atmosphere_mass_content_of_mercury_dry_aerosol_particles_due_to_dry_deposition + + ocean_tracer_laplacian_diffusivity_due_to_parameterized_mesoscale_eddy_advection - - tendency_of_atmosphere_mass_content_of_mercury_dry_aerosol_particles_due_to_wet_deposition + + tendency_of_ocean_eddy_kinetic_energy_content_due_to_parameterized_eddy_advection - - tendency_of_atmosphere_mass_content_of_nitrate_dry_aerosol_particles_due_to_dry_deposition + + northward_ocean_heat_transport_due_to_parameterized_eddy_advection - - tendency_of_atmosphere_mass_content_of_secondary_particulate_organic_matter_dry_aerosol_particles_due_to_dry_deposition + + mole_concentration_of_dissolved_inorganic_13C_in_sea_water - - tendency_of_atmosphere_mass_content_of_secondary_particulate_organic_matter_dry_aerosol_particles_due_to_net_chemical_production + + surface_downward_mass_flux_of_13C_dioxide_abiotic_analogue_expressed_as_13C - - tendency_of_atmosphere_mass_content_of_secondary_particulate_organic_matter_dry_aerosol_particles_due_to_net_chemical_production + + surface_downward_mass_flux_of_14C_dioxide_abiotic_analogue_expressed_as_carbon - - tendency_of_atmosphere_mass_content_of_secondary_particulate_organic_matter_dry_aerosol_particles_due_to_wet_deposition + + mole_concentration_of_dissolved_inorganic_14C_in_sea_water - - tendency_of_atmosphere_mass_content_of_sulfate_dry_aerosol_particles_due_to_dry_deposition + + stem_mass_content_of_carbon - - tendency_of_atmosphere_mass_content_of_sulfate_dry_aerosol_particles_expressed_as_sulfur_due_to_dry_deposition + + subsurface_litter_mass_content_of_carbon - - tendency_of_atmosphere_mass_content_of_sulfate_dry_aerosol_particles_expressed_as_sulfur_due_to_dry_deposition + + mass_flux_of_carbon_into_litter_from_vegetation - - tendency_of_atmosphere_mass_content_of_sulfate_dry_aerosol_particles_expressed_as_sulfur_due_to_gravitational_settling + + litter_mass_content_of_carbon - - tendency_of_atmosphere_mass_content_of_sulfate_dry_aerosol_particles_expressed_as_sulfur_due_to_gravitational_settling + + surface_litter_mass_content_of_carbon - - tendency_of_atmosphere_mass_content_of_sulfate_dry_aerosol_particles_expressed_as_sulfur_due_to_turbulent_deposition + + eastward_transformed_eulerian_mean_air_velocity - - tendency_of_atmosphere_mass_content_of_sulfate_dry_aerosol_particles_expressed_as_sulfur_due_to_turbulent_deposition + + northward_transformed_eulerian_mean_air_velocity - - tendency_of_atmosphere_mass_content_of_sulfate_dry_aerosol_expressed_as_sulfur_due_to_wet_deposition + + surface_upward_mass_flux_of_carbon_dioxide_expressed_as_carbon_due_to_heterotrophic_respiration - - tendency_of_atmosphere_moles_of_nitric_acid_trihydrate_ambient_aerosol_particles + + surface_upward_mass_flux_of_carbon_dioxide_expressed_as_carbon_due_to_respiration_in_soil - - tendency_of_atmosphere_moles_of_sulfate_dry_aerosol_particles + + surface_upward_mass_flux_of_carbon_dioxide_expressed_as_carbon_due_to_plant_respiration - - volume_extinction_coefficient_in_air_due_to_ambient_aerosol_particles + + surface_upward_mass_flux_of_carbon_dioxide_expressed_as_carbon_due_to_plant_respiration_for_biomass_growth - - tendency_of_atmosphere_mass_content_of_sulfate_dry_aerosol_particles_due_to_emission + + surface_upward_mass_flux_of_carbon_dioxide_expressed_as_carbon_due_to_plant_respiration_for_biomass_maintenance - - tendency_of_atmosphere_mass_content_of_dust_dry_aerosol_particles_due_to_emission + + carbon_mass_content_of_forestry_and_agricultural_products - - tendency_of_atmosphere_mass_content_of_particulate_organic_matter_dry_aerosol_particles_expressed_as_carbon_due_to_emission_from_residential_and_commercial_combustion + + carbon_mass_content_of_forestry_and_agricultural_products - - tendency_of_atmosphere_mass_content_of_particulate_organic_matter_dry_aerosol_particles_expressed_as_carbon_due_to_emission_from_waste_treatment_and_disposal + + leaf_mass_content_of_carbon - - tendency_of_atmosphere_mass_content_of_particulate_organic_matter_dry_aerosol_particles_expressed_as_carbon_due_to_emission_from_savanna_and_grassland_fires + + medium_soil_pool_mass_content_of_carbon - - tendency_of_atmosphere_mass_content_of_primary_particulate_organic_matter_dry_aerosol_particles_due_to_emission + + fast_soil_pool_mass_content_of_carbon - - tendency_of_atmosphere_mass_content_of_particulate_organic_matter_dry_aerosol_particles_expressed_as_carbon_due_to_emission_from_land_transport + + miscellaneous_living_matter_mass_content_of_carbon - - tendency_of_atmosphere_mass_content_of_particulate_organic_matter_dry_aerosol_particles_expressed_as_carbon_due_to_emission_from_agricultural_waste_burning + + root_mass_content_of_carbon - - tendency_of_atmosphere_mass_content_of_particulate_organic_matter_dry_aerosol_particles_expressed_as_carbon_due_to_emission_from_energy_production_and_distribution + + slow_soil_pool_mass_content_of_carbon - - tendency_of_atmosphere_mass_content_of_particulate_organic_matter_dry_aerosol_particles_expressed_as_carbon_due_to_emission_from_maritime_transport + + soil_mass_content_of_carbon - - tendency_of_atmosphere_mass_content_of_particulate_organic_matter_dry_aerosol_particles_due_to_net_chemical_production_and_emission + + volume_scattering_coefficient_of_radiative_flux_in_air_due_to_dried_aerosol_particles - - tendency_of_atmosphere_mass_content_of_particulate_organic_matter_dry_aerosol_particles_due_to_net_chemical_production_and_emission + + volume_scattering_coefficient_of_radiative_flux_in_air_due_to_ambient_aerosol_particles - - tendency_of_atmosphere_mass_content_of_particulate_organic_matter_dry_aerosol_particles_expressed_as_carbon_due_to_emission_from_forest_fires + + integral_wrt_depth_of_sea_water_practical_salinity - - tendency_of_atmosphere_mass_content_of_particulate_organic_matter_dry_aerosol_particles_expressed_as_carbon_due_to_emission_from_industrial_processes_and_combustion + + integral_wrt_height_of_product_of_eastward_wind_and_specific_humidity - - area_type + + integral_wrt_height_of_product_of_northward_wind_and_specific_humidity - - area_type + + water_flux_into_sea_water_from_rivers - - sea_surface_swell_wave_significant_height + + toa_outgoing_shortwave_flux_assuming_clear_sky_and_no_aerosol - - sea_surface_wind_wave_significant_height + + wood_debris_mass_content_of_carbon - - sea_surface_wave_significant_height + + stratiform_graupel_flux - - rate_of_hydroxyl_radical_destruction_due_to_reaction_with_nmvoc + + water_volume_transport_into_sea_water_from_rivers - - mass_content_of_water_in_soil_layer + + surface_water_evaporation_flux - - mass_content_of_water_in_soil + + sea_ice_temperature_expressed_as_heat_content - - floating_ice_shelf_area_fraction + + sea_ice_temperature_expressed_as_heat_content - - sea_surface_swell_wave_to_direction + + sea_water_potential_temperature_expressed_as_heat_content - - sea_surface_wind_wave_to_direction + + sea_water_potential_temperature_expressed_as_heat_content - - sea_surface_wave_mean_period + + incoming_water_volume_transport_along_river_channel - - sea_surface_wind_wave_mean_period + + surface_upwelling_longwave_flux_in_air - - sea_surface_swell_wave_mean_period + + surface_upwelling_radiance_per_unit_wavelength_in_air - - ocean_mixed_layer_thickness_defined_by_vertical_tracer_diffusivity_deficit + + surface_upwelling_radiance_per_unit_wavelength_in_air_emerging_from_sea_water - - tendency_of_mole_concentration_of_dissolved_inorganic_phosphorus_in_sea_water_due_to_biological_processes + + surface_upwelling_radiance_per_unit_wavelength_in_air_reflected_by_sea_water - - tendency_of_mole_concentration_of_dissolved_inorganic_silicon_in_sea_water_due_to_biological_processes + + surface_upwelling_radiance_per_unit_wavelength_in_sea_water - - atmosphere_mass_content_of_sea_salt_dry_aerosol_particles + + surface_upwelling_radiative_flux_per_unit_wavelength_in_air - - atmosphere_mass_content_of_sea_salt_dry_aerosol_particles + + surface_upwelling_radiative_flux_per_unit_wavelength_in_sea_water - - atmosphere_optical_thickness_due_to_sea_salt_ambient_aerosol_particles + + surface_upwelling_shortwave_flux_in_air - - atmosphere_optical_thickness_due_to_sea_salt_ambient_aerosol_particles + + surface_upwelling_shortwave_flux_in_air_assuming_clear_sky_and_no_aerosol - - mass_concentration_of_sea_salt_dry_aerosol_particles_in_air + + upwelling_radiance_per_unit_wavelength_in_air - - mass_concentration_of_sea_salt_dry_aerosol_particles_in_air + + upwelling_radiative_flux_per_unit_wavelength_in_air - - mass_fraction_of_sea_salt_dry_aerosol_particles_in_air + + upwelling_radiative_flux_per_unit_wavelength_in_sea_water - - mass_fraction_of_sea_salt_dry_aerosol_particles_in_air + + upwelling_shortwave_flux_in_air_assuming_clear_sky_and_no_aerosol - - tendency_of_atmosphere_mass_content_of_pm10_sea_salt_dry_aerosol_particles_due_to_dry_deposition + + surface_upwelling_longwave_flux_in_air_assuming_clear_sky - - tendency_of_atmosphere_mass_content_of_pm10_sea_salt_dry_aerosol_particles_due_to_emission + + surface_upwelling_shortwave_flux_in_air_assuming_clear_sky - - tendency_of_atmosphere_mass_content_of_pm10_sea_salt_dry_aerosol_particles_due_to_wet_deposition + + downwelling_photon_flux_per_unit_wavelength_in_sea_water - - tendency_of_atmosphere_mass_content_of_pm2p5_sea_salt_dry_aerosol_particles_due_to_wet_deposition + + downwelling_photon_radiance_per_unit_wavelength_in_sea_water - - tendency_of_atmosphere_mass_content_of_sea_salt_dry_aerosol_particles_due_to_dry_deposition + + downwelling_photon_spherical_irradiance_per_unit_wavelength_in_sea_water - - tendency_of_atmosphere_mass_content_of_sea_salt_dry_aerosol_particles_due_to_dry_deposition + + downwelling_radiance_per_unit_wavelength_in_air - - tendency_of_atmosphere_mass_content_of_sea_salt_dry_aerosol_particles_due_to_gravitational_settling + + downwelling_radiance_per_unit_wavelength_in_sea_water - - tendency_of_atmosphere_mass_content_of_sea_salt_dry_aerosol_particles_due_to_gravitational_settling + + downwelling_radiative_flux_per_unit_wavelength_in_air - - tendency_of_atmosphere_mass_content_of_sea_salt_dry_aerosol_particles_due_to_turbulent_deposition + + downwelling_radiative_flux_per_unit_wavelength_in_sea_water - - tendency_of_atmosphere_mass_content_of_sea_salt_dry_aerosol_particles_due_to_turbulent_deposition + + downwelling_shortwave_flux_in_air_assuming_clear_sky_and_no_aerosol - - tendency_of_atmosphere_mass_content_of_sea_salt_dry_aerosol_particles_due_to_wet_deposition + + downwelling_spherical_irradiance_per_unit_wavelength_in_sea_water - - tendency_of_atmosphere_mass_content_of_sea_salt_dry_aerosol_particles_due_to_wet_deposition + + integral_wrt_time_of_surface_downwelling_longwave_flux_in_air - - atmosphere_optical_thickness_due_to_pm1_ambient_aerosol_particles + + integral_wrt_time_of_surface_downwelling_shortwave_flux_in_air - - mass_concentration_of_pm1_ambient_aerosol_particles_in_air + + surface_downwelling_longwave_flux_in_air - - mass_fraction_of_pm1_ambient_aerosol_particles_in_air + + surface_downwelling_photon_flux_per_unit_wavelength_in_sea_water - - mass_fraction_of_pm1_ambient_aerosol_particles_in_air + + surface_downwelling_photon_radiance_per_unit_wavelength_in_sea_water - - atmosphere_optical_thickness_due_to_pm2p5_ambient_aerosol_particles + + surface_downwelling_photon_spherical_irradiance_per_unit_wavelength_in_sea_water - - mass_concentration_of_pm2p5_ambient_aerosol_particles_in_air + + surface_downwelling_radiance_per_unit_wavelength_in_sea_water - - mass_fraction_of_pm2p5_ambient_aerosol_particles_in_air + + surface_downwelling_radiative_flux_per_unit_wavelength_in_air - - mass_fraction_of_pm2p5_ambient_aerosol_particles_in_air + + surface_downwelling_radiative_flux_per_unit_wavelength_in_sea_water - - atmosphere_optical_thickness_due_to_pm10_ambient_aerosol_particles + + surface_downwelling_shortwave_flux_in_air - - mass_concentration_of_pm10_ambient_aerosol_particles_in_air + + surface_downwelling_shortwave_flux_in_air_assuming_clear_sky - - mass_fraction_of_pm10_ambient_aerosol_particles_in_air + + surface_downwelling_shortwave_flux_in_air_assuming_clear_sky_and_no_aerosol - - mass_fraction_of_pm10_ambient_aerosol_particles_in_air + + surface_downwelling_spherical_irradiance_per_unit_wavelength_in_sea_water - - tendency_of_atmosphere_mass_content_of_pm2p5_sea_salt_dry_aerosol_particles_due_to_dry_deposition + + magnitude_of_sea_ice_displacement - - tendency_of_atmosphere_mass_content_of_pm2p5_sea_salt_dry_aerosol_particles_due_to_emission + + tendency_of_sea_ice_amount_due_to_conversion_of_snow_to_sea_ice - - sea_floor_depth_below_mean_sea_level + + outgoing_water_volume_transport_along_river_channel - - sea_surface_height_above_mean_sea_level + + precipitation_flux_onto_canopy - - sea_surface_height_above_mean_sea_level + + water_evaporation_flux_from_canopy - - surface_geostrophic_eastward_sea_water_velocity_assuming_mean_sea_level_for_geoid + + water_evaporation_amount_from_canopy - - surface_geostrophic_eastward_sea_water_velocity_assuming_mean_sea_level_for_geoid + + lwe_stratiform_precipitation_rate - - surface_geostrophic_northward_sea_water_velocity_assuming_mean_sea_level_for_geoid + + lwe_thickness_of_stratiform_precipitation_amount + + + + stratiform_precipitation_amount - - surface_geostrophic_northward_sea_water_velocity_assuming_mean_sea_level_for_geoid + + stratiform_precipitation_flux - - surface_geostrophic_sea_water_x_velocity_assuming_mean_sea_level_for_geoid + + tendency_of_specific_humidity_due_to_stratiform_precipitation - - surface_geostrophic_sea_water_y_velocity_assuming_mean_sea_level_for_geoid + + platform_roll - - tendency_of_sea_surface_height_above_mean_sea_level + + platform_pitch - - surface_geostrophic_northward_sea_water_velocity + + platform_yaw - - surface_geostrophic_eastward_sea_water_velocity + + platform_id - - tendency_of_atmosphere_mass_content_of_nitrogen_compounds_expressed_as_nitrogen_due_to_wet_deposition + + platform_name - - tendency_of_atmosphere_mass_content_of_nitrogen_compounds_expressed_as_nitrogen_due_to_dry_deposition + + water_vapor_partial_pressure_in_air - - tendency_of_atmosphere_mass_content_of_nitrogen_compounds_expressed_as_nitrogen_due_to_deposition + + tendency_of_atmosphere_mole_concentration_of_carbon_monoxide_due_to_chemical_destruction - - atmosphere_absorption_optical_thickness_due_to_sea_salt_ambient_aerosol_particles + + tendency_of_mole_concentration_of_dissolved_inorganic_phosphorus_in_sea_water_due_to_biological_processes - - atmosphere_absorption_optical_thickness_due_to_sea_salt_ambient_aerosol_particles + + tendency_of_mole_concentration_of_dissolved_inorganic_silicon_in_sea_water_due_to_biological_processes - - tendency_of_atmosphere_mass_content_of_sea_salt_dry_aerosol_particles_due_to_emission + + mole_concentration_of_diatoms_expressed_as_nitrogen_in_sea_water - - tendency_of_atmosphere_mass_content_of_sea_salt_dry_aerosol_particles_due_to_emission + + net_primary_mole_productivity_of_biomass_expressed_as_carbon_by_calcareous_phytoplankton - - sea_surface_height_above_geoid + + net_primary_mole_productivity_of_biomass_expressed_as_carbon_by_diatoms - - sea_surface_height_above_geoid + + net_primary_mole_productivity_of_biomass_expressed_as_carbon_by_phytoplankton - - sea_floor_depth_below_geoid + + net_primary_mole_productivity_of_biomass_expressed_as_carbon_by_picophytoplankton - - air_pressure_at_mean_sea_level + + net_primary_mole_productivity_of_biomass_expressed_as_carbon_due_to_nitrate_utilization - - lagrangian_tendency_of_air_pressure + + mole_concentration_of_phytoplankton_expressed_as_nitrogen_in_sea_water - - lagrangian_tendency_of_air_pressure + + tendency_of_atmosphere_mass_content_of_nitrogen_compounds_expressed_as_nitrogen_due_to_wet_deposition - - mass_concentration_of_elemental_carbon_dry_aerosol_particles_in_air + + tendency_of_atmosphere_moles_of_carbon_monoxide - - atmosphere_mass_content_of_elemental_carbon_dry_aerosol_particles + + tendency_of_atmosphere_moles_of_carbon_tetrachloride - - mass_fraction_of_elemental_carbon_dry_aerosol_particles_in_air + + tendency_of_atmosphere_mass_content_of_elemental_carbon_dry_aerosol_particles_due_to_emission_from_agricultural_waste_burning - - tendency_of_atmosphere_mass_content_of_elemental_carbon_dry_aerosol_particles_due_to_dry_deposition + + water_vapor_saturation_deficit_in_air - - tendency_of_atmosphere_mass_content_of_elemental_carbon_dry_aerosol_particles_due_to_emission + + mole_fraction_of_inorganic_bromine_in_air - - tendency_of_atmosphere_mass_content_of_elemental_carbon_dry_aerosol_particles_due_to_emission_from_agricultural_waste_burning + + net_primary_mole_productivity_of_biomass_expressed_as_carbon_by_miscellaneous_phytoplankton - - tendency_of_atmosphere_mass_content_of_elemental_carbon_dry_aerosol_particles_due_to_emission_from_energy_production_and_distribution + + tendency_of_atmosphere_moles_of_methane - - tendency_of_atmosphere_mass_content_of_elemental_carbon_dry_aerosol_particles_due_to_emission_from_forest_fires + + mole_fraction_of_noy_expressed_as_nitrogen_in_air - - tendency_of_atmosphere_mass_content_of_elemental_carbon_dry_aerosol_particles_due_to_emission_from_industrial_processes_and_combustion + + mole_fraction_of_methylglyoxal_in_air - - tendency_of_atmosphere_mass_content_of_elemental_carbon_dry_aerosol_particles_due_to_emission_from_land_transport + + atmosphere_moles_of_carbon_tetrachloride - - tendency_of_atmosphere_mass_content_of_elemental_carbon_dry_aerosol_particles_due_to_emission_from_maritime_transport + + floating_ice_shelf_area_fraction - - tendency_of_atmosphere_mass_content_of_elemental_carbon_dry_aerosol_particles_due_to_emission_from_residential_and_commercial_combustion + + stratiform_cloud_area_fraction - - tendency_of_atmosphere_mass_content_of_elemental_carbon_dry_aerosol_particles_due_to_emission_from_savanna_and_grassland_fires + + tendency_of_atmosphere_mass_content_of_sulfate_dry_aerosol_particles_expressed_as_sulfur_due_to_wet_deposition - - tendency_of_atmosphere_mass_content_of_elemental_carbon_dry_aerosol_particles_due_to_emission_from_waste_treatment_and_disposal + + tendency_of_atmosphere_mass_content_of_sulfate_dry_aerosol_particles_expressed_as_sulfur_due_to_wet_deposition - - tendency_of_atmosphere_mass_content_of_elemental_carbon_dry_aerosol_particles_due_to_gravitational_settling + + mass_fraction_of_mercury_dry_aerosol_particles_in_air - - tendency_of_atmosphere_mass_content_of_elemental_carbon_dry_aerosol_particles_due_to_turbulent_deposition + + tendency_of_atmosphere_mass_content_of_mercury_dry_aerosol_particles_due_to_emission - - tendency_of_atmosphere_mass_content_of_elemental_carbon_dry_aerosol_particles_due_to_wet_deposition + + carbon_mass_flux_into_litter_and_soil_due_to_anthropogenic_land_use_or_land_cover_change - - tendency_of_mass_concentration_of_elemental_carbon_dry_aerosol_particles_in_air_due_to_emission_from_aviation + + product_of_eastward_wind_and_lagrangian_tendency_of_air_pressure - - sea_water_to_direction + + product_of_northward_wind_and_lagrangian_tendency_of_air_pressure - - integral_wrt_time_of_air_temperature_deficit + + backscattering_ratio_in_air - - integral_wrt_time_of_air_temperature_excess + + histogram_of_backscattering_ratio_in_air_over_height_above_reference_ellipsoid - - integral_wrt_time_of_surface_downward_eastward_stress + + effective_radius_of_convective_cloud_ice_particles - - integral_wrt_time_of_surface_downward_northward_stress + + effective_radius_of_convective_cloud_rain_particles - - integral_wrt_time_of_surface_downward_latent_heat_flux + + effective_radius_of_convective_cloud_snow_particles - - integral_wrt_time_of_surface_downward_sensible_heat_flux + + effective_radius_of_stratiform_cloud_graupel_particles - - integral_wrt_time_of_surface_net_downward_longwave_flux + + effective_radius_of_stratiform_cloud_ice_particles - - integral_wrt_time_of_surface_net_downward_shortwave_flux + + effective_radius_of_stratiform_cloud_rain_particles - - integral_wrt_time_of_toa_net_downward_shortwave_flux + + mass_concentration_of_biomass_burning_dry_aerosol_particles_in_air - - integral_wrt_time_of_toa_outgoing_longwave_flux + + diameter_of_ambient_aerosol_particles - - northward_ocean_freshwater_transport_due_to_parameterized_eddy_advection + + electrical_mobility_diameter_of_ambient_aerosol_particles - - northward_ocean_salt_transport_due_to_parameterized_eddy_advection + + lagrangian_tendency_of_atmosphere_sigma_coordinate - - ocean_heat_x_transport_due_to_parameterized_eddy_advection + + lagrangian_tendency_of_atmosphere_sigma_coordinate - - ocean_heat_y_transport_due_to_parameterized_eddy_advection + + tendency_of_atmosphere_number_content_of_aerosol_particles_due_to_turbulent_deposition - - ocean_mass_x_transport_due_to_advection_and_parameterized_eddy_advection + + atmosphere_moles_of_hcfc22 - - ocean_mass_y_transport_due_to_advection_and_parameterized_eddy_advection + + tendency_of_atmosphere_moles_of_hcfc22 - - ocean_meridional_overturning_mass_streamfunction_due_to_parameterized_eddy_advection + + tendency_of_troposphere_moles_of_hcfc22 - - ocean_y_overturning_mass_streamfunction_due_to_parameterized_eddy_advection + + tendency_of_middle_atmosphere_moles_of_hcc140a - - tendency_of_sea_water_salinity_due_to_parameterized_eddy_advection + + tendency_of_troposphere_moles_of_hcc140a - - tendency_of_sea_water_temperature_due_to_parameterized_eddy_advection + + atmosphere_moles_of_hcc140a - - northward_sea_water_velocity_due_to_parameterized_mesoscale_eddies + + tendency_of_atmosphere_moles_of_hcc140a - - eastward_sea_water_velocity_due_to_parameterized_mesoscale_eddies + + atmosphere_moles_of_halon2402 - - sea_water_x_velocity_due_to_parameterized_mesoscale_eddies + + tendency_of_atmosphere_moles_of_halon2402 - - sea_water_y_velocity_due_to_parameterized_mesoscale_eddies + + atmosphere_moles_of_halon1301 - - upward_sea_water_velocity_due_to_parameterized_mesoscale_eddies + + tendency_of_atmosphere_moles_of_halon1301 - - ocean_tracer_biharmonic_diffusivity_due_to_parameterized_mesoscale_eddy_advection + + atmosphere_moles_of_halon1211 - - ocean_tracer_laplacian_diffusivity_due_to_parameterized_mesoscale_eddy_advection + + tendency_of_atmosphere_moles_of_halon1211 - - tendency_of_ocean_eddy_kinetic_energy_content_due_to_parameterized_eddy_advection + + atmosphere_moles_of_halon1202 - - northward_ocean_heat_transport_due_to_parameterized_eddy_advection + + tendency_of_atmosphere_moles_of_halon1202 - - mole_concentration_of_dissolved_inorganic_13C_in_sea_water + + atmosphere_moles_of_cfc12 - - surface_downward_mass_flux_of_13C_dioxide_abiotic_analogue_expressed_as_13C + + tendency_of_atmosphere_moles_of_cfc12 - - surface_downward_mass_flux_of_14C_dioxide_abiotic_analogue_expressed_as_carbon + + atmosphere_moles_of_cfc115 - - mole_concentration_of_dissolved_inorganic_14C_in_sea_water + + tendency_of_atmosphere_moles_of_cfc115 - - stem_mass_content_of_carbon + + atmosphere_moles_of_cfc114 - - subsurface_litter_mass_content_of_carbon + + tendency_of_atmosphere_moles_of_cfc114 - - mass_flux_of_carbon_into_litter_from_vegetation + + atmosphere_moles_of_cfc113 - - litter_mass_content_of_carbon + + tendency_of_atmosphere_moles_of_cfc113 - - surface_litter_mass_content_of_carbon + + atmosphere_moles_of_cfc11 - - eastward_transformed_eulerian_mean_air_velocity + + moles_of_cfc11_per_unit_mass_in_sea_water - - northward_transformed_eulerian_mean_air_velocity + + tendency_of_atmosphere_moles_of_cfc11 - - surface_upward_mass_flux_of_carbon_dioxide_expressed_as_carbon_due_to_heterotrophic_respiration + + effective_radius_of_stratiform_cloud_snow_particles - - surface_upward_mass_flux_of_carbon_dioxide_expressed_as_carbon_due_to_respiration_in_soil + + tendency_of_sea_water_conservative_temperature_expressed_as_heat_content_due_to_parameterized_dianeutral_mixing - - surface_upward_mass_flux_of_carbon_dioxide_expressed_as_carbon_due_to_plant_respiration + + tendency_of_sea_water_potential_temperature_expressed_as_heat_content_due_to_parameterized_dianeutral_mixing - - surface_upward_mass_flux_of_carbon_dioxide_expressed_as_carbon_due_to_plant_respiration_for_biomass_growth + + tendency_of_sea_water_salinity_expressed_as_salt_content_due_to_parameterized_dianeutral_mixing - - surface_upward_mass_flux_of_carbon_dioxide_expressed_as_carbon_due_to_plant_respiration_for_biomass_maintenance + + product_of_lagrangian_tendency_of_air_pressure_and_geopotential_height - - carbon_mass_content_of_forestry_and_agricultural_products + + product_of_lagrangian_tendency_of_air_pressure_and_specific_humidity - - carbon_mass_content_of_forestry_and_agricultural_products + + product_of_lagrangian_tendency_of_air_pressure_and_specific_humidity - - leaf_mass_content_of_carbon + + volume_fraction_of_condensed_water_in_soil - - medium_soil_pool_mass_content_of_carbon + + volume_fraction_of_condensed_water_in_soil_at_critical_point - - fast_soil_pool_mass_content_of_carbon + + volume_fraction_of_condensed_water_in_soil_at_field_capacity - - miscellaneous_living_matter_mass_content_of_carbon + + volume_fraction_of_condensed_water_in_soil_at_wilting_point - - root_mass_content_of_carbon + + integral_wrt_depth_of_product_of_salinity_and_sea_water_density - - slow_soil_pool_mass_content_of_carbon + + sea_water_velocity_to_direction - - soil_mass_content_of_carbon + + sea_water_velocity_to_direction - - tendency_of_atmosphere_mole_concentration_of_carbon_monoxide_due_to_chemical_destruction + + sea_water_velocity_from_direction - - volume_scattering_coefficient_of_radiative_flux_in_air_due_to_dried_aerosol_particles + + atmosphere_mass_content_of_cloud_liquid_water - - volume_scattering_coefficient_of_radiative_flux_in_air_due_to_ambient_aerosol_particles + + effective_radius_of_cloud_liquid_water_particles - - integral_wrt_depth_of_sea_water_practical_salinity + + effective_radius_of_convective_cloud_liquid_water_particles - - integral_wrt_depth_of_sea_water_temperature + + effective_radius_of_convective_cloud_liquid_water_particles_at_convective_liquid_water_cloud_top - - integral_wrt_depth_of_sea_water_temperature + + effective_radius_of_stratiform_cloud_liquid_water_particles - - integral_wrt_depth_of_sea_water_temperature + + effective_radius_of_stratiform_cloud_liquid_water_particles_at_stratiform_liquid_water_cloud_top - - integral_wrt_depth_of_sea_water_temperature + + number_concentration_of_convective_cloud_liquid_water_particles_at_convective_liquid_water_cloud_top - - integral_wrt_height_of_product_of_eastward_wind_and_specific_humidity + + number_concentration_of_stratiform_cloud_liquid_water_particles_at_stratiform_liquid_water_cloud_top - - integral_wrt_height_of_product_of_northward_wind_and_specific_humidity + + mass_content_of_cloud_liquid_water_in_atmosphere_layer - - water_flux_into_sea_water_from_rivers + + effective_radius_of_cloud_liquid_water_particles_at_liquid_water_cloud_top - - toa_outgoing_shortwave_flux_assuming_clear_sky_and_no_aerosol + + atmosphere_mass_content_of_convective_cloud_liquid_water - - wood_debris_mass_content_of_carbon + + tendency_of_mass_fraction_of_stratiform_cloud_ice_in_air_due_to_riming_from_cloud_liquid_water - - stratiform_graupel_flux + + tendency_of_mass_fraction_of_stratiform_cloud_ice_in_air_due_to_heterogeneous_nucleation_from_cloud_liquid_water - - water_volume_transport_into_sea_water_from_rivers + + tendency_of_mass_fraction_of_stratiform_cloud_ice_in_air_due_to_melting_to_cloud_liquid_water - - surface_water_evaporation_flux + + growth_limitation_of_diazotrophic_phytoplankton_due_to_solar_irradiance - - water_evapotranspiration_flux + + iron_growth_limitation_of_diazotrophic_phytoplankton - - sea_ice_temperature_expressed_as_heat_content + + mass_concentration_of_diazotrophic_phytoplankton_expressed_as_chlorophyll_in_sea_water - - sea_ice_temperature_expressed_as_heat_content + + mole_concentration_of_diazotrophic_phytoplankton_expressed_as_carbon_in_sea_water - - sea_water_potential_temperature_expressed_as_heat_content + + net_primary_mole_productivity_of_biomass_expressed_as_carbon_by_diazotrophic_phytoplankton - - sea_water_potential_temperature_expressed_as_heat_content + + net_primary_mole_productivity_of_biomass_expressed_as_carbon_by_diazotrophic_phytoplankton - - incoming_water_volume_transport_along_river_channel + + nitrogen_growth_limitation_of_diazotrophic_phytoplankton - - surface_upwelling_longwave_flux_in_air + + tendency_of_mole_concentration_of_particulate_organic_matter_expressed_as_carbon_in_sea_water_due_to_net_primary_production_by_diazotrophic_phytoplankton - - surface_upwelling_radiance_per_unit_wavelength_in_air + + mass_fraction_of_liquid_precipitation_in_air - - surface_upwelling_radiance_per_unit_wavelength_in_air_emerging_from_sea_water + + mass_fraction_of_liquid_precipitation_in_air - - surface_upwelling_radiance_per_unit_wavelength_in_air_reflected_by_sea_water + + area_type - - surface_upwelling_radiance_per_unit_wavelength_in_sea_water + + area_type - - surface_upwelling_radiative_flux_per_unit_wavelength_in_air + + upward_derivative_of_eastward_wind - - surface_upwelling_radiative_flux_per_unit_wavelength_in_sea_water + + upward_derivative_of_northward_wind - - surface_upwelling_shortwave_flux_in_air + + atmosphere_upward_absolute_vorticity - - surface_upwelling_shortwave_flux_in_air_assuming_clear_sky_and_no_aerosol + + atmosphere_upward_relative_vorticity - - upwelling_radiance_per_unit_wavelength_in_air + + surface_snow_density - - upwelling_radiative_flux_per_unit_wavelength_in_air + + tendency_of_atmosphere_mass_content_of_water_vapor_due_to_sublimation_of_surface_snow_and_ice - - upwelling_radiative_flux_per_unit_wavelength_in_sea_water + + integral_wrt_time_of_surface_downward_northward_stress - - upwelling_shortwave_flux_in_air_assuming_clear_sky_and_no_aerosol + + integral_wrt_time_of_surface_downward_eastward_stress - - surface_upwelling_longwave_flux_in_air_assuming_clear_sky + + thermal_energy_content_of_surface_snow - - surface_upwelling_shortwave_flux_in_air_assuming_clear_sky + + surface_snow_thickness - - downwelling_photon_flux_per_unit_wavelength_in_sea_water + + liquid_water_content_of_surface_snow - - downwelling_photon_radiance_per_unit_wavelength_in_sea_water + + soot_content_of_surface_snow - - downwelling_photon_spherical_irradiance_per_unit_wavelength_in_sea_water + + biological_taxon_lsid - - downwelling_radiance_per_unit_wavelength_in_air + + water_evapotranspiration_flux - - downwelling_radiance_per_unit_wavelength_in_sea_water + + drainage_amount_through_base_of_soil_model - - downwelling_radiative_flux_per_unit_wavelength_in_air + + moles_of_particulate_inorganic_carbon_per_unit_mass_in_sea_water - - downwelling_radiative_flux_per_unit_wavelength_in_sea_water + + water_flux_into_sea_water_due_to_flux_adjustment - - downwelling_shortwave_flux_in_air_assuming_clear_sky_and_no_aerosol + + heat_flux_into_sea_water_due_to_flux_adjustment - - downwelling_spherical_irradiance_per_unit_wavelength_in_sea_water + + volume_backwards_scattering_coefficient_of_radiative_flux_in_air_due_to_dried_aerosol_particles - - integral_wrt_time_of_surface_downwelling_longwave_flux_in_air + + volume_extinction_coefficient_of_radiative_flux_in_air_due_to_ambient_aerosol_particles - - integral_wrt_time_of_surface_downwelling_shortwave_flux_in_air + + volume_extinction_coefficient_of_radiative_flux_in_air_due_to_ambient_aerosol_particles - - surface_downwelling_longwave_flux_in_air + + volume_extinction_coefficient_of_radiative_flux_in_air_due_to_cloud_particles - - surface_downwelling_photon_flux_per_unit_wavelength_in_sea_water + + volume_attenuated_backwards_scattering_coefficient_of_radiative_flux_in_air_assuming_no_aerosol_or_cloud - - surface_downwelling_photon_radiance_per_unit_wavelength_in_sea_water + + volume_attenuated_backwards_scattering_coefficient_of_radiative_flux_in_air - - surface_downwelling_photon_spherical_irradiance_per_unit_wavelength_in_sea_water + + volume_absorption_coefficient_of_radiative_flux_in_air_due_to_dried_aerosol_particles - - surface_downwelling_radiance_per_unit_wavelength_in_sea_water + + air_equivalent_temperature - - surface_downwelling_radiative_flux_per_unit_wavelength_in_air + + air_equivalent_potential_temperature - - surface_downwelling_radiative_flux_per_unit_wavelength_in_sea_water + + air_pseudo_equivalent_potential_temperature - - surface_downwelling_shortwave_flux_in_air + + air_pseudo_equivalent_temperature - - surface_downwelling_shortwave_flux_in_air_assuming_clear_sky + + surface_temperature - - surface_downwelling_shortwave_flux_in_air_assuming_clear_sky_and_no_aerosol + + surface_temperature - - surface_downwelling_spherical_irradiance_per_unit_wavelength_in_sea_water + + surface_temperature - - magnitude_of_sea_ice_displacement + + temperature_in_ground - - tendency_of_sea_ice_amount_due_to_conversion_of_snow_to_sea_ice + + temperature_in_surface_snow - - outgoing_water_volume_transport_along_river_channel + + integral_wrt_depth_of_product_of_conservative_temperature_and_sea_water_density - - precipitation_flux_onto_canopy + + integral_wrt_depth_of_product_of_potential_temperature_and_sea_water_density - - water_evaporation_flux_from_canopy + + integral_wrt_depth_of_sea_water_temperature - - tendency_of_atmosphere_mass_content_of_water_vapor_due_to_sublimation_of_surface_snow_and_ice + + integral_wrt_depth_of_sea_water_temperature - - water_evaporation_amount_from_canopy + + integral_wrt_depth_of_sea_water_temperature - - lwe_stratiform_precipitation_rate + + integral_wrt_depth_of_sea_water_temperature - - lwe_thickness_of_stratiform_precipitation_amount + + universal_thermal_comfort_index - - stratiform_precipitation_amount + + product_of_lagrangian_tendency_of_air_pressure_and_air_temperature - - stratiform_precipitation_flux + + product_of_lagrangian_tendency_of_air_pressure_and_air_temperature - - tendency_of_air_temperature_due_to_stratiform_precipitation + + integral_wrt_time_of_air_temperature_deficit - - tendency_of_specific_humidity_due_to_stratiform_precipitation + + integral_wrt_time_of_air_temperature_excess - - platform_id + + tendency_of_air_temperature_due_to_stratiform_precipitation - - platform_name + + tendency_of_sea_water_temperature_due_to_parameterized_eddy_advection diff --git a/lib/iris/__init__.py b/lib/iris/__init__.py index 77b1cba8e2..a06e36a2e2 100644 --- a/lib/iris/__init__.py +++ b/lib/iris/__init__.py @@ -1,21 +1,8 @@ -# (C) British Crown Copyright 2010 - 2019, Met Office +# Copyright Iris contributors # -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . -""" -A package for handling multi-dimensional data and associated metadata. +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""A package for handling multi-dimensional data and associated metadata. .. note :: @@ -46,73 +33,80 @@ All the load functions share very similar arguments: - * uris: - Either a single filename/URI expressed as a string, or an - iterable of filenames/URIs. +* uris: + Either a single filename/URI expressed as a string or + :class:`pathlib.PurePath`, or an iterable of filenames/URIs. - Filenames can contain `~` or `~user` abbreviations, and/or - Unix shell-style wildcards (e.g. `*` and `?`). See the - standard library function :func:`os.path.expanduser` and - module :mod:`fnmatch` for more details. + Filenames can contain `~` or `~user` abbreviations, and/or + Unix shell-style wildcards (e.g. `*` and `?`). See the + standard library function :func:`os.path.expanduser` and + module :mod:`fnmatch` for more details. - * constraints: - Either a single constraint, or an iterable of constraints. - Each constraint can be either a string, an instance of - :class:`iris.Constraint`, or an instance of - :class:`iris.AttributeConstraint`. If the constraint is a string - it will be used to match against cube.name(). + .. warning:: - .. _constraint_egs: + If supplying a URL, only OPeNDAP Data Sources are supported. - For example:: +* constraints: + Either a single constraint, or an iterable of constraints. + Each constraint can be either a string, an instance of + :class:`iris.Constraint`, or an instance of + :class:`iris.AttributeConstraint`. If the constraint is a string + it will be used to match against cube.name(). - # Load air temperature data. - load_cube(uri, 'air_temperature') + .. _constraint_egs: - # Load data with a specific model level number. - load_cube(uri, iris.Constraint(model_level_number=1)) + For example:: - # Load data with a specific STASH code. - load_cube(uri, iris.AttributeConstraint(STASH='m01s00i004')) + # Load air temperature data. + load_cube(uri, 'air_temperature') - * callback: - A function to add metadata from the originating field and/or URI which - obeys the following rules: + # Load data with a specific model level number. + load_cube(uri, iris.Constraint(model_level_number=1)) - 1. Function signature must be: ``(cube, field, filename)``. - 2. Modifies the given cube inplace, unless a new cube is - returned by the function. - 3. If the cube is to be rejected the callback must raise - an :class:`iris.exceptions.IgnoreCubeException`. + # Load data with a specific STASH code. + load_cube(uri, iris.AttributeConstraint(STASH='m01s00i004')) - For example:: +* callback: + A function to add metadata from the originating field and/or URI which + obeys the following rules: - def callback(cube, field, filename): - # Extract ID from filenames given as: __ - experiment_id = filename.split('__')[1] - experiment_coord = iris.coords.AuxCoord( - experiment_id, long_name='experiment_id') - cube.add_aux_coord(experiment_coord) + 1. Function signature must be: ``(cube, field, filename)``. + 2. Modifies the given cube inplace, unless a new cube is + returned by the function. + 3. If the cube is to be rejected the callback must raise + an :class:`iris.exceptions.IgnoreCubeException`. -""" + For example:: -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa -import six + def callback(cube, field, filename): + # Extract ID from filenames given as: __ + experiment_id = filename.split('__')[1] + experiment_coord = iris.coords.AuxCoord( + experiment_id, long_name='experiment_id') + cube.add_aux_coord(experiment_coord) +""" + +from collections.abc import Iterable import contextlib import glob +import importlib import itertools import os.path import threading +from typing import Callable, Literal -import iris.config -import iris.cube import iris._constraints -from iris._deprecation import IrisDeprecation, warn_deprecated -import iris.fileformats +import iris.config import iris.io +from ._deprecation import IrisDeprecation, warn_deprecated + +try: + from ._version import version as __version__ # noqa: F401 +except ModuleNotFoundError: + __version__ = "unknown" + try: import iris_sample_data @@ -120,71 +114,101 @@ def callback(cube, field, filename): iris_sample_data = None -# Iris revision. -__version__ = '3.0.dev0' - # Restrict the names imported when using "from iris import *" -__all__ = ['load', 'load_cube', 'load_cubes', 'load_raw', - 'save', 'Constraint', 'AttributeConstraint', 'sample_data_path', - 'site_configuration', 'Future', 'FUTURE', - 'IrisDeprecation'] +__all__ = [ + "AttributeConstraint", + "Constraint", + "FUTURE", + "Future", + "IrisDeprecation", + "NameConstraint", + "load", + "load_cube", + "load_cubes", + "load_raw", + "sample_data_path", + "save", + "site_configuration", + "use_plugin", +] Constraint = iris._constraints.Constraint AttributeConstraint = iris._constraints.AttributeConstraint +NameConstraint = iris._constraints.NameConstraint class Future(threading.local): """Run-time configuration controller.""" - def __init__(self): - """ - A container for run-time options controls. + def __init__(self, datum_support=False, pandas_ndim=False, save_split_attrs=False): + """Container for run-time options controls. To adjust the values simply update the relevant attribute from within your code. For example:: + # example_future_flag is a fictional example. iris.FUTURE.example_future_flag = False If Iris code is executed with multiple threads, note the values of these options are thread-specific. - .. note:: - - iris.FUTURE.example_future_flag does not exist. It is provided - as an example because there are currently no flags in - iris.Future. + Parameters + ---------- + datum_support : bool, default=False + Opts in to loading coordinate system datum information from NetCDF + files into :class:`~iris.coord_systems.CoordSystem`, wherever + this information is present. + pandas_ndim : bool, default=False + See :func:`iris.pandas.as_data_frame` for details - opts in to the + newer n-dimensional behaviour. + save_split_attrs : bool, default=False + Save "global" and "local" cube attributes to netcdf in appropriately + different ways : "global" ones are saved as dataset attributes, where + possible, while "local" ones are saved as data-variable attributes. + See :func:`iris.fileformats.netcdf.saver.save`. """ - # The flag 'example_future_flag' is provided as a future reference - # for the structure of this class. + # The flag 'example_future_flag' is provided as a reference for the + # structure of this class. + # + # Note that self.__dict__ is used explicitly due to the manner in which + # __setattr__ is overridden. # # self.__dict__['example_future_flag'] = example_future_flag - pass + self.__dict__["datum_support"] = datum_support + self.__dict__["pandas_ndim"] = pandas_ndim + self.__dict__["save_split_attrs"] = save_split_attrs - def __repr__(self): + # TODO: next major release: set IrisDeprecation to subclass + # DeprecationWarning instead of UserWarning. + def __repr__(self): # msg = ('Future(example_future_flag={})') # return msg.format(self.example_future_flag) - msg = ('Future()') - return msg.format() + msg = "Future(datum_support={}, pandas_ndim={}, save_split_attrs={})" + return msg.format(self.datum_support, self.pandas_ndim, self.save_split_attrs) # deprecated_options = {'example_future_flag': 'warning',} - deprecated_options = {} + deprecated_options: dict[str, Literal["error", "warning"]] = {} def __setattr__(self, name, value): if name in self.deprecated_options: level = self.deprecated_options[name] - if level == 'error' and not value: - emsg = ("setting the 'Future' property {prop!r} has been " - "deprecated to be removed in a future release, and " - "deprecated {prop!r} behaviour has been removed. " - "Please remove code that sets this property.") + if level == "error" and not value: + emsg = ( + "setting the 'Future' property {prop!r} has been " + "deprecated to be removed in a future release, and " + "deprecated {prop!r} behaviour has been removed. " + "Please remove code that sets this property." + ) raise AttributeError(emsg.format(prop=name)) else: - msg = ("setting the 'Future' property {!r} is deprecated " - "and will be removed in a future release. " - "Please remove code that sets this property.") + msg = ( + "setting the 'Future' property {!r} is deprecated " + "and will be removed in a future release. " + "Please remove code that sets this property." + ) warn_deprecated(msg.format(name)) if name not in self.__dict__: msg = "'Future' object has no attribute {!r}".format(name) @@ -193,29 +217,23 @@ def __setattr__(self, name, value): @contextlib.contextmanager def context(self, **kwargs): - """ - Return a context manager which allows temporary modification of - the option values for the active thread. + """Return context manager for temp modification of option values for the active thread. On entry to the `with` statement, all keyword arguments are applied to the Future object. On exit from the `with` statement, the previous state is restored. For example:: + + # example_future_flag is a fictional example. with iris.FUTURE.context(example_future_flag=False): # ... code that expects some past behaviour - .. note:: - - iris.FUTURE.example_future_flag does not exist and is - provided only as an example since there are currently no - flags in Future. - """ # Save the current context current_state = self.__dict__.copy() # Update the state - for name, value in six.iteritems(kwargs): + for name, value in kwargs.items(): setattr(self, name, value) try: yield @@ -231,7 +249,10 @@ def context(self, **kwargs): # Initialise the site configuration dictionary. #: Iris site configuration dictionary. -site_configuration = {} +site_configuration: dict[ + Literal["cf_profile", "cf_patch", "cf_patch_conventions"], + Callable | Literal[False] | None, +] = {} try: from iris.site_config import update as _update @@ -242,58 +263,65 @@ def context(self, **kwargs): def _generate_cubes(uris, callback, constraints): - """Returns a generator of cubes given the URIs and a callback.""" - if isinstance(uris, six.string_types): + """Return a generator of cubes given the URIs and a callback.""" + if isinstance(uris, str) or not isinstance(uris, Iterable): + # Make a string, or other single item, into an iterable. uris = [uris] # Group collections of uris by their iris handler # Create list of tuples relating schemes to part names uri_tuples = sorted(iris.io.decode_uri(uri) for uri in uris) - for scheme, groups in (itertools.groupby(uri_tuples, key=lambda x: x[0])): + for scheme, groups in itertools.groupby(uri_tuples, key=lambda x: x[0]): # Call each scheme handler with the appropriate URIs - if scheme == 'file': + if scheme == "file": part_names = [x[1] for x in groups] for cube in iris.io.load_files(part_names, callback, constraints): yield cube - elif scheme in ['http', 'https']: - urls = [':'.join(x) for x in groups] + elif scheme in ["http", "https"]: + urls = [":".join(x) for x in groups] for cube in iris.io.load_http(urls, callback): yield cube + elif scheme == "data": + data_objects = [x[1] for x in groups] + for cube in iris.io.load_data_objects(data_objects, callback): + yield cube else: - raise ValueError('Iris cannot handle the URI scheme: %s' % scheme) + raise ValueError("Iris cannot handle the URI scheme: %s" % scheme) def _load_collection(uris, constraints=None, callback=None): + from iris.cube import _CubeFilterCollection + try: cubes = _generate_cubes(uris, callback, constraints) - result = iris.cube._CubeFilterCollection.from_cubes(cubes, constraints) + result = _CubeFilterCollection.from_cubes(cubes, constraints) except EOFError as e: raise iris.exceptions.TranslationError( - "The file appears empty or incomplete: {!r}".format(str(e))) + "The file appears empty or incomplete: {!r}".format(str(e)) + ) return result def load(uris, constraints=None, callback=None): - """ - Loads any number of Cubes for each constraint. + """Load any number of Cubes for each constraint. For a full description of the arguments, please see the module documentation for :mod:`iris`. - Args: - - * uris: - One or more filenames/URIs. - - Kwargs: - - * constraints: + Parameters + ---------- + uris : str or :class:`pathlib.PurePath` + One or more filenames/URIs, as a string or :class:`pathlib.PurePath`. + If supplying a URL, only OPeNDAP Data Sources are supported. + constraints : optional One or more constraints. - * callback: + callback : optional A modifier/filter function. - Returns: + Returns + ------- + :class:`iris.cube.CubeList` An :class:`iris.cube.CubeList`. Note that there is no inherent order to this :class:`iris.cube.CubeList` and it should be treated as if it were random. @@ -303,31 +331,29 @@ def load(uris, constraints=None, callback=None): def load_cube(uris, constraint=None, callback=None): - """ - Loads a single cube. + """Load a single cube. For a full description of the arguments, please see the module documentation for :mod:`iris`. - Args: - - * uris: - One or more filenames/URIs. - - Kwargs: - - * constraints: + Parameters + ---------- + uris : + One or more filenames/URIs, as a string or :class:`pathlib.PurePath`. + If supplying a URL, only OPeNDAP Data Sources are supported. + constraints : optional A constraint. - * callback: + callback : optional A modifier/filter function. - Returns: - An :class:`iris.cube.Cube`. + Returns + ------- + :class:`iris.cube.Cube` """ constraints = iris._constraints.list_of_constraints(constraint) if len(constraints) != 1: - raise ValueError('only a single constraint is allowed') + raise ValueError("only a single constraint is allowed") cubes = _load_collection(uris, constraints, callback).cubes() @@ -336,31 +362,30 @@ def load_cube(uris, constraint=None, callback=None): except iris.exceptions.MergeError as e: raise iris.exceptions.ConstraintMismatchError(str(e)) except ValueError: - raise iris.exceptions.ConstraintMismatchError('no cubes found') + raise iris.exceptions.ConstraintMismatchError("no cubes found") return cube def load_cubes(uris, constraints=None, callback=None): - """ - Loads exactly one Cube for each constraint. + """Load exactly one Cube for each constraint. For a full description of the arguments, please see the module documentation for :mod:`iris`. - Args: - - * uris: - One or more filenames/URIs. - - Kwargs: - - * constraints: + Parameters + ---------- + uris : + One or more filenames/URIs, as a string or :class:`pathlib.PurePath`. + If supplying a URL, only OPeNDAP Data Sources are supported. + constraints : optional One or more constraints. - * callback: + callback : optional A modifier/filter function. - Returns: + Returns + ------- + :class:`iris.cube.CubeList` An :class:`iris.cube.CubeList`. Note that there is no inherent order to this :class:`iris.cube.CubeList` and it should be treated as if it were random. @@ -372,17 +397,16 @@ def load_cubes(uris, constraints=None, callback=None): # Make sure we have exactly one merged cube per constraint bad_pairs = [pair for pair in collection.pairs if len(pair) != 1] if bad_pairs: - fmt = ' {} -> {} cubes' + fmt = " {} -> {} cubes" bits = [fmt.format(pair.constraint, len(pair)) for pair in bad_pairs] - msg = '\n' + '\n'.join(bits) + msg = "\n" + "\n".join(bits) raise iris.exceptions.ConstraintMismatchError(msg) return collection.cubes() def load_raw(uris, constraints=None, callback=None): - """ - Loads non-merged cubes. + """Load non-merged cubes. This function is provided for those occasions where the automatic combination of cubes into higher-dimensional cubes is undesirable. @@ -393,23 +417,23 @@ def load_raw(uris, constraints=None, callback=None): For a full description of the arguments, please see the module documentation for :mod:`iris`. - Args: - - * uris: - One or more filenames/URIs. - - Kwargs: - - * constraints: + Parameters + ---------- + uris : + One or more filenames/URIs, as a string or :class:`pathlib.PurePath`. + If supplying a URL, only OPeNDAP Data Sources are supported. + constraints : optional One or more constraints. - * callback: + callback : optional A modifier/filter function. - Returns: - An :class:`iris.cube.CubeList`. + Returns + ------- + :class:`iris.cube.CubeList` """ from iris.fileformats.um._fast_load import _raw_structured_loading + with _raw_structured_loading(): return _load_collection(uris, constraints, callback).cubes() @@ -418,8 +442,7 @@ def load_raw(uris, constraints=None, callback=None): def sample_data_path(*path_to_join): - """ - Given the sample data resource, returns the full path to the file. + """Given the sample data resource, returns the full path to the file. .. note:: @@ -430,18 +453,48 @@ def sample_data_path(*path_to_join): """ target = os.path.join(*path_to_join) if os.path.isabs(target): - raise ValueError('Absolute paths, such as {!r}, are not supported.\n' - 'NB. This function is only for locating files in the ' - 'iris sample data collection. It is not needed or ' - 'appropriate for general file access.'.format(target)) + raise ValueError( + "Absolute paths, such as {!r}, are not supported.\n" + "NB. This function is only for locating files in the " + "iris sample data collection. It is not needed or " + "appropriate for general file access.".format(target) + ) if iris_sample_data is not None: target = os.path.join(iris_sample_data.path, target) else: - raise ImportError("Please install the 'iris-sample-data' package to " - "access sample data.") + raise ImportError( + "Please install the 'iris-sample-data' package to access sample data." + ) if not glob.glob(target): - raise ValueError('Sample data file(s) at {!r} not found.\n' - 'NB. This function is only for locating files in the ' - 'iris sample data collection. It is not needed or ' - 'appropriate for general file access.'.format(target)) + raise ValueError( + "Sample data file(s) at {!r} not found.\n" + "NB. This function is only for locating files in the " + "iris sample data collection. It is not needed or " + "appropriate for general file access.".format(target) + ) return target + + +def use_plugin(plugin_name): + """Import a plugin. + + Parameters + ---------- + plugin_name : str + Name of plugin. + + Examples + -------- + The following:: + + use_plugin("my_plugin") + + is equivalent to:: + + import iris.plugins.my_plugin + + This is useful for plugins that are not used directly, but instead do all + their setup on import. In this case, style checkers would not know the + significance of the import statement and warn that it is an unused import. + """ + importlib.import_module(f"iris.plugins.{plugin_name}") diff --git a/lib/iris/_concatenate.py b/lib/iris/_concatenate.py index 9f37fd24b2..841cecfd53 100644 --- a/lib/iris/_concatenate.py +++ b/lib/iris/_concatenate.py @@ -1,50 +1,39 @@ -# (C) British Crown Copyright 2013 - 2017, Met Office +# Copyright Iris contributors # -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . -""" -Automatic concatenation of multiple cubes over one or more existing dimensions. - -""" +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Automatic concatenation of multiple cubes over one or more existing dimensions.""" -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa -import six - -from collections import defaultdict, namedtuple -from copy import deepcopy +from collections import namedtuple +from collections.abc import Mapping, Sequence +import itertools +from typing import Any +import warnings +import dask import dask.array as da import numpy as np +from xxhash import xxh3_64 +from iris._lazy_data import concatenate as concatenate_arrays import iris.coords +from iris.coords import AncillaryVariable, AuxCoord, CellMeasure, DimCoord import iris.cube -from iris.util import guess_coord_axis, array_equal - +import iris.exceptions +from iris.util import array_equal, guess_coord_axis +import iris.warnings # # TODO: # # * Cope with auxiliary coordinate factories. # -# * Allow concatentation over a user specified dimension. +# * Allow concatenation over a user specified dimension. # # Restrict the names imported from this namespace. -__all__ = ['concatenate'] +__all__ = ["concatenate"] # Direction of dimension coordinate value order. _CONSTANT = 0 @@ -52,19 +41,16 @@ _INCREASING = 1 -class _CoordAndDims(namedtuple('CoordAndDims', - ['coord', 'dims'])): - """ +class _CoordAndDims(namedtuple("CoordAndDims", ["coord", "dims"])): + """Container for a coordinate and the associated data dimension(s). + Container for a coordinate and the associated data dimension(s) spanned over a :class:`iris.cube.Cube`. - Args: - - * coord: - A :class:`iris.coords.DimCoord` or :class:`iris.coords.AuxCoord` - coordinate instance. - - * dims: + Parameters + ---------- + coord : :class:`iris.coords.DimCoord` or :class:`iris.coords.AuxCoord` + dims : tuple A tuple of the data dimension(s) spanned by the coordinate. """ @@ -72,58 +58,55 @@ class _CoordAndDims(namedtuple('CoordAndDims', __slots__ = () -class _CoordMetaData(namedtuple('CoordMetaData', - ['defn', 'dims', 'points_dtype', - 'bounds_dtype', 'kwargs'])): - """ - Container for the metadata that defines a dimension or auxiliary - coordinate. - - Args: +class _CoordMetaData( + namedtuple( + "CoordMetaData", + ["defn", "dims", "points_dtype", "bounds_dtype", "kwargs"], + ) +): + """Container for the metadata that defines a dimension or auxiliary coordinate. - * defn: - The :class:`iris.coords.CoordDefn` metadata that represents a + Parameters + ---------- + defn : :class:`iris.common.CoordMetadata` + The :class:`iris.common.CoordMetadata` metadata that represents a coordinate. - - * dims: + dims : The dimension(s) associated with the coordinate. - - * points_dtype: + points_dtype : :class:`np.dtype` The points data :class:`np.dtype` of an associated coordinate. - - * bounds_dtype: + bounds_dtype : :class:`np.dtype` The bounds data :class:`np.dtype` of an associated coordinate. - - * kwargs: + **kwargs : dict, optional A dictionary of key/value pairs required to define a coordinate. """ - def __new__(cls, coord, dims): - """ - Create a new :class:`_CoordMetaData` instance. - - Args: - * coord: - The :class:`iris.coord.DimCoord` or :class:`iris.coord.AuxCoord`. + def __new__(mcs, coord, dims): + """Create a new :class:`_CoordMetaData` instance. - * dims: + Parameters + ---------- + coord : :class:`iris.coord.DimCoord` or :class:`iris.coord.AuxCoord` + dims : The dimension(s) associated with the coordinate. - Returns: - The new class instance. + Returns + ------- + The new class instance. """ - defn = coord._as_defn() - points_dtype = coord.points.dtype - bounds_dtype = coord.bounds.dtype if coord.bounds is not None \ - else None + defn = coord.metadata + points_dtype = coord.core_points().dtype + bounds_dtype = ( + coord.core_bounds().dtype if coord.core_bounds() is not None else None + ) kwargs = {} # Add scalar flag metadata. - kwargs['scalar'] = coord.points.size == 1 + kwargs["scalar"] = coord.core_points().size == 1 # Add circular flag metadata for dimensional coordinates. - if hasattr(coord, 'circular'): - kwargs['circular'] = coord.circular + if hasattr(coord, "circular"): + kwargs["circular"] = coord.circular if isinstance(coord, iris.coords.DimCoord): # Mix the monotonic ordering into the metadata. if coord.points[0] == coord.points[-1]: @@ -132,24 +115,21 @@ def __new__(cls, coord, dims): order = _INCREASING else: order = _DECREASING - kwargs['order'] = order - metadata = super(_CoordMetaData, cls).__new__(cls, defn, dims, - points_dtype, - bounds_dtype, - kwargs) + kwargs["order"] = order + metadata = super().__new__(mcs, defn, dims, points_dtype, bounds_dtype, kwargs) return metadata __slots__ = () def __hash__(self): - return super(_CoordMetaData, self).__hash__() + return super().__hash__() def __eq__(self, other): result = NotImplemented if isinstance(other, _CoordMetaData): sprops, oprops = self._asdict(), other._asdict() # Ignore "kwargs" meta-data for the first comparison. - sprops['kwargs'] = oprops['kwargs'] = None + sprops["kwargs"] = oprops["kwargs"] = None result = sprops == oprops if result: skwargs, okwargs = self.kwargs.copy(), other.kwargs.copy() @@ -157,11 +137,11 @@ def __eq__(self, other): # The monotonic "order" must be _INCREASING or _DECREASING if # the DimCoord is NOT "scalar". Otherwise, if the DimCoord is # "scalar" then the "order" must be _CONSTANT. - if skwargs['scalar'] or okwargs['scalar']: + if skwargs["scalar"] or okwargs["scalar"]: # We don't care about the monotonic "order" given that # at least one coordinate is a scalar coordinate. - skwargs['scalar'] = okwargs['scalar'] = None - skwargs['order'] = okwargs['order'] = None + skwargs["scalar"] = okwargs["scalar"] = None + skwargs["order"] = okwargs["order"] = None result = skwargs == okwargs return result @@ -176,55 +156,129 @@ def name(self): return self.defn.name() -class _SkeletonCube(namedtuple('SkeletonCube', - ['signature', 'data'])): +class _DerivedCoordAndDims( + namedtuple("DerivedCoordAndDims", ["coord", "dims", "aux_factory"]) +): + """Container for a derived coordinate and dimensions(s). + + Container for a derived coordinate, the associated AuxCoordFactory, and the + associated data dimension(s) spanned over a :class:`iris.cube.Cube`. + + Parameters + ---------- + coord : :class:`iris.coord.DimCoord` or :class:`iris.coord.AuxCoord` + dims : tuple + A tuple of the data dimension(s) spanned by the coordinate. + aux_factory : :class:`iris.aux_factory.AuxCoordFactory` + """ - Basis of a source-cube, containing the associated coordinate metadata, - coordinates and cube data payload. - Args: + __slots__ = () - * signature: - The :class:`_CoordSignature` of an associated source-cube. + def __eq__(self, other): + """Do not take aux factories into account for equality.""" + result = NotImplemented + if isinstance(other, _DerivedCoordAndDims): + equal_coords = self.coord == other.coord + equal_dims = self.dims == other.dims + result = equal_coords and equal_dims + return result - * data: - The data payload of an associated :class:`iris.cube.Cube` source-cube. + +class _OtherMetaData(namedtuple("OtherMetaData", ["defn", "dims"])): + """Container for the metadata that defines a cell measure or ancillary variable. + + Parameters + ---------- + defn : :class:`iris.coords._DMDefn` or :class:`iris.coords._CellMeasureDefn` + The :class:`iris.coords._DMDefn` or :class:`iris.coords._CellMeasureDefn` + metadata that represents a coordinate. + dims : + The dimension(s) associated with the coordinate. """ + def __new__(cls, ancil, dims): + """Create a new :class:`_OtherMetaData` instance. + + Parameters + ---------- + ancil : :class:`iris.coord.CellMeasure` or :class:`iris.coord.AncillaryVariable` + dims : + The dimension(s) associated with ancil. + + Returns + ------- + The new class instance. + + """ + defn = ancil.metadata + metadata = super().__new__(cls, defn, dims) + return metadata + __slots__ = () + def __hash__(self): + return super().__hash__() -class _Extent(namedtuple('Extent', - ['min', 'max'])): - """ - Container representing the limits of a one-dimensional extent/range. + def __eq__(self, other): + result = NotImplemented + if isinstance(other, _OtherMetaData): + result = self._asdict() == other._asdict() + return result - Args: + def __ne__(self, other): + result = self.__eq__(other) + if result is not NotImplemented: + result = not result + return result - * min: - The minimum value of the extent. + def name(self): + """Get the name from the coordinate definition.""" + return self.defn.name() - * max: - The maximum value of the extent. + +class _SkeletonCube(namedtuple("SkeletonCube", ["signature", "data"])): + """Basis of a source-cube. + + Basis of a source-cube, containing the associated coordinate metadata, + coordinates and cube data payload. + + Parameters + ---------- + signature : :class:`_CoordSignature` + The :class:`_CoordSignature` of an associated source-cube. + data : + The data payload of an associated :class:`iris.cube.Cube` source-cube. """ __slots__ = () -class _CoordExtent(namedtuple('CoordExtent', - ['points', 'bounds'])): +class _Extent(namedtuple("Extent", ["min", "max"])): + """Container representing the limits of a one-dimensional extent/range. + + Parameters + ---------- + min : + The minimum value of the extent. + max : + The maximum value of the extent. + """ - Container representing the points and bounds extent of a one dimensional - coordinate. - Args: + __slots__ = () - * points: - The :class:`_Extent` of the coordinate point values. - * bounds: +class _CoordExtent(namedtuple("CoordExtent", ["points", "bounds"])): + """Container representing the points and bounds extent of a one dimensional coordinate. + + Parameters + ---------- + points : :class:`_Extent` + The :class:`_Extent` of the coordinate point values. + bounds : A list containing the :class:`_Extent` of the coordinate lower bound and the upper bound. Defaults to None if no associated bounds exist for the coordinate. @@ -234,61 +288,357 @@ class _CoordExtent(namedtuple('CoordExtent', __slots__ = () -def concatenate(cubes, error_on_mismatch=False, check_aux_coords=True): +def _hash_ndarray(a: np.ndarray) -> np.ndarray: + """Compute a hash from a numpy array. + + Calculates a 64-bit non-cryptographic hash of the provided array, using + the fast ``xxhash`` hashing algorithm. + + Parameters + ---------- + a : + The array to hash. + + Returns + ------- + numpy.ndarray : + An array of shape (1,) containing the hash value. + """ - Concatenate the provided cubes over common existing dimensions. + # Include the array dtype as it is not preserved by `ndarray.tobytes()`. + hash = xxh3_64(f"dtype={a.dtype}".encode("utf-8")) + + # Hash the bytes representing the array data. + hash.update(b"data=") + if isinstance(a, np.ma.MaskedArray): + # Hash only the unmasked data + hash.update(a.compressed().tobytes()) + # Hash the mask + hash.update(b"mask=") + hash.update(a.mask.tobytes()) + else: + hash.update(a.tobytes()) + return np.frombuffer(hash.digest(), dtype=np.int64) + + +def _hash_chunk( + x_chunk: np.ndarray, + axis: tuple[int] | None, + keepdims: bool, +) -> np.ndarray: + """Compute a hash from a numpy array. + + This function can be applied to each chunk or intermediate chunk in + :func:`~dask.array.reduction`. It preserves the number of input dimensions + to facilitate combining intermediate results into intermediate chunks. + + Parameters + ---------- + x_chunk : + The array to hash. + axis : + Unused but required by :func:`~dask.array.reduction`. + keepdims : + Unused but required by :func:`~dask.array.reduction`. + + Returns + ------- + numpy.ndarray : + An array containing the hash value. - Args: + """ + return _hash_ndarray(x_chunk).reshape((1,) * x_chunk.ndim) - * cubes: - An iterable containing one or more :class:`iris.cube.Cube` instances - to be concatenated together. - Kwargs: +def _hash_aggregate( + x_chunk: np.ndarray, + axis: tuple[int] | None, + keepdims: bool, +) -> np.int64: + """Compute a hash from a numpy array. + + This function can be applied as the final step in :func:`~dask.array.reduction`. + + Parameters + ---------- + x_chunk : + The array to hash. + axis : + Unused but required by :func:`~dask.array.reduction`. + keepdims : + Unused but required by :func:`~dask.array.reduction`. + + Returns + ------- + np.int64 : + The hash value. + + """ + (result,) = _hash_ndarray(x_chunk) + return result + + +def _hash_array(a: da.Array | np.ndarray) -> np.int64: + """Calculate a hash representation of the provided array. + + Calculates a 64-bit non-cryptographic hash of the provided array, using + the fast ``xxhash`` hashing algorithm. + + Note that the computed hash depends on how the array is chunked. + + Parameters + ---------- + a : + The array that requires to have its hexdigest calculated. + + Returns + ------- + np.int64 + The array's hash. + + """ + if isinstance(a, da.Array): + # Use :func:`~dask.array.reduction` to compute a hash from a Dask array. + # + # A hash of each input chunk will be computed by the `chunk` function + # and those hashes will be combined into one or more intermediate chunks. + # If there are multiple intermediate chunks, a hash for each intermediate + # chunk will be computed by the `combine` function and the + # results will be combined into a new layer of intermediate chunks. This + # will be repeated until only a single intermediate chunk remains. + # Finally, a single hash value will be computed from the last + # intermediate chunk by the `aggregate` function. + result = da.reduction( + a, + chunk=_hash_chunk, + combine=_hash_chunk, + aggregate=_hash_aggregate, + keepdims=False, + meta=np.empty(tuple(), dtype=np.int64), + dtype=np.int64, + ) + else: + result = _hash_aggregate(a, None, False) + return result + + +class _ArrayHash(namedtuple("ArrayHash", ["value", "chunks"])): + """Container for a hash value and the chunks used when computing it. + + Parameters + ---------- + value : :class:`np.int64` + The hash value. + chunks : tuple + The chunks the array had when the hash was computed. + """ + + __slots__ = () + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, self.__class__): + raise TypeError(f"Unable to compare {repr(self)} to {repr(other)}") + + def shape(chunks): + return tuple(sum(c) for c in chunks) - * error_on_mismatch: + if shape(self.chunks) == shape(other.chunks): + if self.chunks != other.chunks: + raise ValueError( + "Unable to compare arrays with different chunks: " + f"{self.chunks} != {other.chunks}" + ) + result = self.value == other.value + else: + result = False + return result + + +def _array_id( + coord: DimCoord | AuxCoord | AncillaryVariable | CellMeasure, + bound: bool, +) -> str: + """Get a unique key for looking up arrays associated with coordinates.""" + return f"{id(coord)}{bound}" + + +def _compute_hashes( + arrays: Mapping[str, np.ndarray | da.Array], +) -> dict[str, _ArrayHash]: + """Compute hashes for the arrays that will be compared. + + Two arrays are considered equal if each unmasked element compares equal + and the masks are equal. However, hashes depend on chunking and dtype. + Therefore, arrays with the same shape are rechunked so they have the same + chunks and arrays with numerical dtypes are cast up to the same dtype before + computing the hashes. + + Parameters + ---------- + arrays : + A mapping with key-array pairs. + + Returns + ------- + dict[str, _ArrayHash] : + An dictionary of hashes. + + """ + hashes = {} + + def is_numerical(dtype): + return np.issubdtype(dtype, np.bool_) or np.issubdtype(dtype, np.number) + + def group_key(item): + array_id, a = item + if is_numerical(a.dtype): + dtype = "numerical" + else: + dtype = str(a.dtype) + return a.shape, dtype + + sorted_arrays = sorted(arrays.items(), key=group_key) + for _, group_iter in itertools.groupby(sorted_arrays, key=group_key): + array_ids, group = zip(*group_iter) + # Unify dtype for numerical arrays, as the hash depends on it + if is_numerical(group[0].dtype): + dtype = np.result_type(*group) + same_dtype_arrays = tuple(a.astype(dtype) for a in group) + else: + same_dtype_arrays = group + if any(isinstance(a, da.Array) for a in same_dtype_arrays): + # Unify chunks as the hash depends on the chunks. + indices = tuple(range(group[0].ndim)) + # Because all arrays in a group have the same shape, `indices` + # are the same for all of them. Providing `indices` as a tuple + # instead of letters is easier to do programmatically. + argpairs = [(a, indices) for a in same_dtype_arrays] + __, rechunked_arrays = da.core.unify_chunks(*itertools.chain(*argpairs)) + else: + rechunked_arrays = same_dtype_arrays + for array_id, rechunked in zip(array_ids, rechunked_arrays): + if isinstance(rechunked, da.Array): + chunks = rechunked.chunks + else: + chunks = tuple((i,) for i in rechunked.shape) + hashes[array_id] = (_hash_array(rechunked), chunks) + + (hashes,) = dask.compute(hashes) + return {k: _ArrayHash(*v) for k, v in hashes.items()} + + +def concatenate( + cubes: Sequence[iris.cube.Cube], + error_on_mismatch: bool = False, + check_aux_coords: bool = True, + check_cell_measures: bool = True, + check_ancils: bool = True, + check_derived_coords: bool = True, +) -> iris.cube.CubeList: + """Concatenate the provided cubes over common existing dimensions. + + Parameters + ---------- + cubes : iterable of :class:`iris.cube.Cube` + An iterable containing one or more :class:`iris.cube.Cube` instances + to be concatenated together. + error_on_mismatch : bool, default=False If True, raise an informative :class:`~iris.exceptions.ContatenateError` if registration fails. - - Returns: - A :class:`iris.cube.CubeList` of concatenated :class:`iris.cube.Cube` - instances. + check_aux_coords : bool, default=True + Checks if the points and bounds of auxiliary coordinates of the cubes + match. This check is not applied to auxiliary coordinates that span the + dimension the concatenation is occurring along. Defaults to True. + check_cell_measures : bool, default=True + Checks if the data of cell measures of the cubes match. This check is + not applied to cell measures that span the dimension the concatenation + is occurring along. Defaults to True. + check_ancils : bool, default=True + Checks if the data of ancillary variables of the cubes match. This + check is not applied to ancillary variables that span the dimension the + concatenation is occurring along. Defaults to True. + check_derived_coords : bool, default=True + Checks if the points and bounds of derived coordinates of the cubes + match. This check is not applied to derived coordinates that span the + dimension the concatenation is occurring along. Note that differences + in scalar coordinates and dimensional coordinates used to derive the + coordinate are still checked. Checks for auxiliary coordinates used to + derive the coordinates can be ignored with `check_aux_coords`. Defaults + to True. + + Returns + ------- + :class:`iris.cube.CubeList` + A :class:`iris.cube.CubeList` of concatenated :class:`iris.cube.Cube` instances. """ - proto_cubes_by_name = defaultdict(list) + cube_signatures = [_CubeSignature(cube) for cube in cubes] + + proto_cubes: list[_ProtoCube] = [] # Initialise the nominated axis (dimension) of concatenation # which requires to be negotiated. axis = None + # Compute hashes for parallel array comparison. + arrays = {} + + def add_coords(cube_signature: _CubeSignature, coord_type: str) -> None: + for coord_and_dims in getattr(cube_signature, coord_type): + coord = coord_and_dims.coord + array_id = _array_id(coord, bound=False) + if isinstance(coord, (DimCoord, AuxCoord)): + arrays[array_id] = coord.core_points() + if coord.has_bounds(): + bound_array_id = _array_id(coord, bound=True) + arrays[bound_array_id] = coord.core_bounds() + else: + arrays[array_id] = coord.core_data() + + for cube_signature in cube_signatures: + if check_aux_coords: + add_coords(cube_signature, "aux_coords_and_dims") + if check_derived_coords: + add_coords(cube_signature, "derived_coords_and_dims") + if check_cell_measures: + add_coords(cube_signature, "cell_measures_and_dims") + if check_ancils: + add_coords(cube_signature, "ancillary_variables_and_dims") + + hashes = _compute_hashes(arrays) + # Register each cube with its appropriate proto-cube. - for cube in cubes: - name = cube.standard_name or cube.long_name - proto_cubes = proto_cubes_by_name[name] + for cube_signature in cube_signatures: registered = False # Register cube with an existing proto-cube. for proto_cube in proto_cubes: - registered = proto_cube.register(cube, axis, error_on_mismatch, - check_aux_coords) + registered = proto_cube.register( + cube_signature, + hashes, + axis, + error_on_mismatch, + check_aux_coords, + check_cell_measures, + check_ancils, + check_derived_coords, + ) if registered: axis = proto_cube.axis break # Create a new proto-cube for an unregistered cube. if not registered: - proto_cubes.append(_ProtoCube(cube)) + proto_cubes.append(_ProtoCube(cube_signature)) # Construct a concatenated cube from each of the proto-cubes. concatenated_cubes = iris.cube.CubeList() # Emulate Python 2 behaviour. - def _none_sort(item): - return (item is not None, item) + def _none_sort(proto_cube): + return (proto_cube.name is not None, proto_cube.name) - for name in sorted(proto_cubes_by_name, key=_none_sort): - for proto_cube in proto_cubes_by_name[name]: - # Construct the concatenated cube. - concatenated_cubes.append(proto_cube.concatenate()) + for proto_cube in sorted(proto_cubes, key=_none_sort): + concatenated_cubes.append(proto_cube.concatenate()) # Perform concatenation until we've reached an equilibrium. count = len(concatenated_cubes) @@ -298,20 +648,20 @@ def _none_sort(item): return concatenated_cubes -class _CubeSignature(object): - """ +class _CubeSignature: + """Template for identifying a specific type of :class:`iris.cube.Cube`. + Template for identifying a specific type of :class:`iris.cube.Cube` based on its metadata, coordinates and cell_measures. """ - def __init__(self, cube): - """ - Represents the cube metadata and associated coordinate metadata that - allows suitable cubes for concatenation to be identified. - Args: + def __init__(self, cube: iris.cube.Cube) -> None: + """Represent the cube metadata and associated coordinate metadata. - * cube: + Parameters + ---------- + cube : :class:`iris.cube.Cube` The :class:`iris.cube.Cube` source-cube. """ @@ -321,7 +671,12 @@ def __init__(self, cube): self.dim_metadata = [] self.ndim = cube.ndim self.scalar_coords = [] - self.cell_measures_and_dims = cube._cell_measures_and_dims + self.cell_measures_and_dims = [] + self.cm_metadata = [] + self.ancillary_variables_and_dims = [] + self.av_metadata = [] + self.derived_coords_and_dims = [] + self.derived_metadata = [] self.dim_mapping = [] # Determine whether there are any anonymous cube dimensions. @@ -330,14 +685,14 @@ def __init__(self, cube): self.defn = cube.metadata self.data_type = cube.dtype + self.src_cube = cube # # Collate the dimension coordinate metadata. # - for ind, coord in enumerate(self.dim_coords): - dims = cube.coord_dims(coord) - metadata = _CoordMetaData(coord, dims) - self.dim_metadata.append(metadata) + for dim_coord in self.dim_coords: + dims = cube.coord_dims(dim_coord) + self.dim_metadata.append(_CoordMetaData(dim_coord, dims)) self.dim_mapping.append(dims[0]) # @@ -346,130 +701,168 @@ def __init__(self, cube): axes = dict(T=0, Z=1, Y=2, X=3) # Coordinate sort function - by guessed coordinate axis, then - # by coordinate definition, then by dimensions, in ascending order. + # by coordinate name, then by dimensions, in ascending order. def key_func(coord): - return (axes.get(guess_coord_axis(coord), len(axes) + 1), - coord._as_defn(), - cube.coord_dims(coord)) - - for coord in sorted(cube.aux_coords, key=key_func): - dims = cube.coord_dims(coord) + return ( + axes.get(guess_coord_axis(coord), len(axes) + 1), + coord.name(), + cube.coord_dims(coord), + ) + + for aux_coord in sorted(cube.aux_coords, key=key_func): + dims = cube.coord_dims(aux_coord) if dims: - metadata = _CoordMetaData(coord, dims) - self.aux_metadata.append(metadata) - coord_and_dims = _CoordAndDims(coord, tuple(dims)) - self.aux_coords_and_dims.append(coord_and_dims) + self.aux_metadata.append(_CoordMetaData(aux_coord, dims)) + self.aux_coords_and_dims.append(_CoordAndDims(aux_coord, tuple(dims))) else: - self.scalar_coords.append(coord) + self.scalar_coords.append(aux_coord) + + def meta_key_func(dm): + return (dm.metadata, dm.cube_dims(cube)) + + for cm in sorted(cube.cell_measures(), key=meta_key_func): + dims = cube.cell_measure_dims(cm) + self.cm_metadata.append(_OtherMetaData(cm, dims)) + self.cell_measures_and_dims.append(_CoordAndDims(cm, tuple(dims))) + + for av in sorted(cube.ancillary_variables(), key=meta_key_func): + dims = cube.ancillary_variable_dims(av) + self.av_metadata.append(_OtherMetaData(av, dims)) + self.ancillary_variables_and_dims.append(_CoordAndDims(av, tuple(dims))) + + def name_key_func(factory): + return factory.name() + + for factory in sorted(cube.aux_factories, key=name_key_func): + coord = factory.make_coord(cube.coord_dims) + dims = factory.derived_dims(cube.coord_dims) + self.derived_metadata.append(_CoordMetaData(coord, dims)) + self.derived_coords_and_dims.append( + _DerivedCoordAndDims(coord, tuple(dims), factory) + ) + + def _coordinate_differences(self, other, attr, reason="metadata"): + """Determine the names of the coordinates that differ. - def _coordinate_differences(self, other, attr): - """ Determine the names of the coordinates that differ between `self` and `other` for a coordinate attribute on a _CubeSignature. - Args: - - * other (_CubeSignature): + Parameters + ---------- + other : _CubeSignature The _CubeSignature to compare against. - - * attr (string): + attr : str The _CubeSignature attribute within which differences exist between `self` and `other`. - - Returns: - Tuple of a descriptive error message and the names of coordinates + reason : str, default="metadata" + The reason to give for mismatch (function is normally, but not + always, testing metadata). + + Returns + ------- + tuple + Tuple of a descriptive error message and the names of attributes that differ between `self` and `other`. """ - # Set up {name: coord_metadata} dictionaries. + # Set up {name: attribute} dictionaries. self_dict = {x.name(): x for x in getattr(self, attr)} other_dict = {x.name(): x for x in getattr(other, attr)} if len(self_dict) == 0: - self_dict = {'< None >': None} + self_dict = {"< None >": None} if len(other_dict) == 0: - other_dict = {'< None >': None} + other_dict = {"< None >": None} self_names = sorted(self_dict.keys()) other_names = sorted(other_dict.keys()) - # Compare coord metadata. + # Compare coord attributes. if len(self_names) != len(other_names) or self_names != other_names: - result = ('', ', '.join(self_names), ', '.join(other_names)) + result = ("", ", ".join(self_names), ", ".join(other_names)) else: diff_names = [] - for self_key, self_value in six.iteritems(self_dict): + for self_key, self_value in self_dict.items(): other_value = other_dict[self_key] if self_value != other_value: diff_names.append(self_key) - result = (' metadata', - ', '.join(diff_names), - ', '.join(diff_names)) + result = ( + " " + reason, + ", ".join(diff_names), + ", ".join(diff_names), + ) return result def match(self, other, error_on_mismatch): - """ - Return whether this _CubeSignature equals another. + """Return whether this _CubeSignature equals another. This is the first step to determine if two "cubes" (either a real Cube or a ProtoCube) can be concatenated, by considering: - - data dimensions - - dimensions metadata - - aux coords metadata - - scalar coords - - attributes - - dtype - Args: + * data dimensions + * aux coords metadata + * scalar coords + * attributes + * dtype - * other (_CubeSignature): + Parameters + ---------- + other : _CubeSignature The _CubeSignature to compare against. - - * error_on_mismatch (bool): + error_on_mismatch : bool If True, raise a :class:`~iris.exceptions.MergeException` with a detailed explanation if the two do not match. - Returns: - Boolean. True if and only if this _CubeSignature matches the other. + Returns + ------- + bool + True if and only if this _CubeSignature matches the other. """ - msg_template = '{}{} differ: {} != {}' + msg_template = "{}{} differ: {} != {}" msgs = [] # Check cube definitions. if self.defn != other.defn: # Note that the case of different phenomenon names is dealt # with in :meth:`iris.cube.CubeList.concatenate_cube()`. - msg = 'Cube metadata differs for phenomenon: {}' + msg = "Cube metadata differs for phenomenon: {}" msgs.append(msg.format(self.defn.name())) # Check dim coordinates. if self.dim_metadata != other.dim_metadata: - differences = self._coordinate_differences(other, 'dim_metadata') - msgs.append(msg_template.format('Dimension coordinates', - *differences)) + differences = self._coordinate_differences(other, "dim_metadata") + msgs.append(msg_template.format("Dimension coordinates", *differences)) # Check aux coordinates. if self.aux_metadata != other.aux_metadata: - differences = self._coordinate_differences(other, 'aux_metadata') - msgs.append(msg_template.format('Auxiliary coordinates', - *differences)) + differences = self._coordinate_differences(other, "aux_metadata") + msgs.append(msg_template.format("Auxiliary coordinates", *differences)) + # Check cell measures. + if self.cm_metadata != other.cm_metadata: + differences = self._coordinate_differences(other, "cm_metadata") + msgs.append(msg_template.format("Cell measures", *differences)) + # Check ancillary variables. + if self.av_metadata != other.av_metadata: + differences = self._coordinate_differences(other, "av_metadata") + msgs.append(msg_template.format("Ancillary variables", *differences)) + # Check derived coordinates. + if self.derived_metadata != other.derived_metadata: + differences = self._coordinate_differences(other, "derived_metadata") + msgs.append(msg_template.format("Derived coordinates", *differences)) # Check scalar coordinates. if self.scalar_coords != other.scalar_coords: - differences = self._coordinate_differences(other, 'scalar_coords') - msgs.append(msg_template.format('Scalar coordinates', - *differences)) + differences = self._coordinate_differences( + other, "scalar_coords", reason="values or metadata" + ) + msgs.append(msg_template.format("Scalar coordinates", *differences)) # Check ndim. if self.ndim != other.ndim: - msgs.append(msg_template.format('Data dimensions', '', - self.ndim, other.ndim)) + msgs.append( + msg_template.format("Data dimensions", "", self.ndim, other.ndim) + ) # Check data type. if self.data_type != other.data_type: - msgs.append(msg_template.format('Data types', '', - self.data_type, other.data_type)) - - # Check _cell_measures_and_dims - if self.cell_measures_and_dims != other.cell_measures_and_dims: - msgs.append(msg_template.format('CellMeasures', '', - self.cell_measures_and_dims, - other.cell_measures_and_dims)) + msgs.append( + msg_template.format("Data types", "", self.data_type, other.data_type) + ) match = not bool(msgs) if error_on_mismatch and not match: @@ -477,40 +870,46 @@ def match(self, other, error_on_mismatch): return match -class _CoordSignature(object): - """ - Template for identifying a specific type of :class:`iris.cube.Cube` based - on its coordinates. +class _CoordSignature: + """Template for identifying a specific type of :class:`iris.cube.Cube` based on its coordinates.""" - """ - def __init__(self, cube_signature): - """ - Represents the coordinate metadata required to identify suitable + def __init__(self, cube_signature: _CubeSignature) -> None: + """Represent the coordinate metadata. + + Represent the coordinate metadata required to identify suitable non-overlapping :class:`iris.cube.Cube` source-cubes for concatenation over a common single dimension. - Args: - - * cube_signature: + Parameters + ---------- + cube_signature : :class:`_CubeSignature` The :class:`_CubeSignature` that defines the source-cube. """ self.aux_coords_and_dims = cube_signature.aux_coords_and_dims + self.cell_measures_and_dims = cube_signature.cell_measures_and_dims + self.ancillary_variables_and_dims = cube_signature.ancillary_variables_and_dims + self.derived_coords_and_dims = cube_signature.derived_coords_and_dims self.dim_coords = cube_signature.dim_coords self.dim_mapping = cube_signature.dim_mapping - self.dim_extents = [] - self.dim_order = [metadata.kwargs['order'] - for metadata in cube_signature.dim_metadata] + self.dim_extents: list[_CoordExtent] = [] + self.dim_order = [ + metadata.kwargs["order"] for metadata in cube_signature.dim_metadata + ] # Calculate the extents for each dimensional coordinate. self._calculate_extents() @staticmethod - def _cmp(coord, other): - """ - Compare the coordinates for concatenation compatibility. - - Returns: + def _cmp( + coord: iris.coords.DimCoord, + other: iris.coords.DimCoord, + ) -> tuple[bool, bool]: + """Compare the coordinates for concatenation compatibility. + + Returns + ------- + bool tuple A boolean tuple pair of whether the coordinates are compatible, and whether they represent a candidate axis of concatenation. @@ -518,35 +917,29 @@ def _cmp(coord, other): # A candidate axis must have non-identical coordinate points. candidate_axis = not array_equal(coord.points, other.points) - if candidate_axis: - # Ensure both have equal availability of bounds. - result = (coord.bounds is None) == (other.bounds is None) - else: - if coord.bounds is not None and other.bounds is not None: - # Ensure equality of bounds. - result = array_equal(coord.bounds, other.bounds) - else: - # Ensure both have equal availability of bounds. - result = coord.bounds is None and other.bounds is None + # Ensure both have equal availability of bounds. + result = coord.has_bounds() == other.has_bounds() + if result and not candidate_axis: + # Ensure equality of bounds. + result = array_equal(coord.bounds, other.bounds) return result, candidate_axis - def candidate_axis(self, other): - """ - Determine the candidate axis of concatenation with the - given coordinate signature. + def candidate_axis(self, other: "_CoordSignature") -> int | None: + """Determine the candidate axis of concatenation with the given coordinate signature. If a candidate axis is found, then the coordinate signatures are compatible. - Args: - - * other: - The :class:`_CoordSignature` + Parameters + ---------- + other : :class:`_CoordSignature` - Returns: - None if no single candidate axis exists, otherwise - the candidate axis of concatenation. + Returns + ------- + result : + None if no single candidate axis exists, otherwise the candidate + axis of concatenation. """ result = False @@ -564,61 +957,57 @@ def candidate_axis(self, other): # Only permit one degree of dimensional freedom when # determining the candidate axis of concatenation. if result and len(candidate_axes) == 1: - result = candidate_axes[0] + axis = candidate_axes[0] else: - result = None + axis = None - return result - - def _calculate_extents(self): - """ - Calculate the extent over each dimension coordinates points and bounds. + return axis - """ + def _calculate_extents(self) -> None: + """Calculate the extent over each dimension coordinates points and bounds.""" self.dim_extents = [] for coord, order in zip(self.dim_coords, self.dim_order): if order == _CONSTANT or order == _INCREASING: points = _Extent(coord.points[0], coord.points[-1]) if coord.bounds is not None: - bounds = (_Extent(coord.bounds[0, 0], coord.bounds[-1, 0]), - _Extent(coord.bounds[0, 1], coord.bounds[-1, 1])) + bounds = ( + _Extent(coord.bounds[0, 0], coord.bounds[-1, 0]), + _Extent(coord.bounds[0, 1], coord.bounds[-1, 1]), + ) else: bounds = None else: # The order must be decreasing ... points = _Extent(coord.points[-1], coord.points[0]) if coord.bounds is not None: - bounds = (_Extent(coord.bounds[-1, 0], coord.bounds[0, 0]), - _Extent(coord.bounds[-1, 1], coord.bounds[0, 1])) + bounds = ( + _Extent(coord.bounds[-1, 0], coord.bounds[0, 0]), + _Extent(coord.bounds[-1, 1], coord.bounds[0, 1]), + ) else: bounds = None self.dim_extents.append(_CoordExtent(points, bounds)) -class _ProtoCube(object): - """ - Framework for concatenating multiple source-cubes over one - common dimension. - - """ - def __init__(self, cube): - """ - Create a new _ProtoCube from the given cube and record the cube - as a source-cube. +class _ProtoCube: + """Framework for concatenating multiple source-cubes over one common dimension.""" - Args: + def __init__(self, cube_signature): + """Create a new _ProtoCube from the given cube and record the cube as a source-cube. - * cube: - Source :class:`iris.cube.Cube` of the :class:`_ProtoCube`. + Parameters + ---------- + cube_signature : + Source :class:`_CubeSignature` of the :class:`_ProtoCube`. """ # Cache the source-cube of this proto-cube. - self._cube = cube + self._cube = cube_signature.src_cube # The cube signature is a combination of cube and coordinate # metadata that defines this proto-cube. - self._cube_signature = _CubeSignature(cube) + self._cube_signature = cube_signature # The coordinate signature allows suitable non-overlapping # source-cubes to be identified. @@ -626,7 +1015,7 @@ def __init__(self, cube): # The list of source-cubes relevant to this proto-cube. self._skeletons = [] - self._add_skeleton(self._coord_signature, cube.lazy_data()) + self._add_skeleton(self._coord_signature, self._cube.lazy_data()) # The nominated axis of concatenation. self._axis = None @@ -634,15 +1023,23 @@ def __init__(self, cube): @property def axis(self): """Return the nominated dimension of concatenation.""" - return self._axis + @property + def name(self) -> str | None: + """Return the standard_name or long name.""" + metadata = self._cube_signature.defn + return metadata.standard_name or metadata.long_name + def concatenate(self): - """ - Concatenates all the source-cubes registered with the + """Concatenate all the source-cubes registered with the :class:`_ProtoCube`. + + Concatenate all the source-cubes registered with the :class:`_ProtoCube` over the nominated common dimension. - Returns: + Returns + ------- + :class:`iris.cube.Cube` The concatenated :class:`iris.cube.Cube`. """ @@ -654,27 +1051,49 @@ def concatenate(self): # Sequence the skeleton segments into the correct order # pending concatenation. - skeletons.sort(key=lambda skeleton: skeleton.signature.dim_extents, - reverse=(order == _DECREASING)) + skeletons.sort( + key=lambda skeleton: skeleton.signature.dim_extents, + reverse=(order == _DECREASING), + ) # Concatenate the new dimension coordinate. dim_coords_and_dims = self._build_dim_coordinates() - # Concatenate the new auxiliary coordinates. + # Concatenate the new auxiliary coordinates (does NOT include + # scalar coordinates!). aux_coords_and_dims = self._build_aux_coordinates() + # Concatenate the new scalar coordinates. + scalar_coords = self._build_scalar_coordinates() + + # Concatenate the new cell measures + cell_measures_and_dims = self._build_cell_measures() + + # Concatenate the new ancillary variables + ancillary_variables_and_dims = self._build_ancillary_variables() + + # Concatenate the new aux factories + aux_factories = self._build_aux_factories( + dim_coords_and_dims, aux_coords_and_dims, scalar_coords + ) + # Concatenate the new data payload. data = self._build_data() # Build the new cube. + all_aux_coords_and_dims = aux_coords_and_dims + [ + (scalar_coord, ()) for scalar_coord in scalar_coords + ] kwargs = cube_signature.defn._asdict() - new_cm_and_dims = [(deepcopy(cm), dims) for cm, dims - in self._cube._cell_measures_and_dims] - cube = iris.cube.Cube(data, - dim_coords_and_dims=dim_coords_and_dims, - aux_coords_and_dims=aux_coords_and_dims, - cell_measures_and_dims=new_cm_and_dims, - **kwargs) + cube = iris.cube.Cube( + data, + dim_coords_and_dims=dim_coords_and_dims, + aux_coords_and_dims=all_aux_coords_and_dims, + cell_measures_and_dims=cell_measures_and_dims, + ancillary_variables_and_dims=ancillary_variables_and_dims, + aux_factories=aux_factories, + **kwargs, + ) else: # There are no other source-cubes to concatenate # with this proto-cube. @@ -682,74 +1101,165 @@ def concatenate(self): return cube - def register(self, cube, axis=None, error_on_mismatch=False, - check_aux_coords=False): - """ - Determine whether the given source-cube is suitable for concatenation + def register( + self, + cube_signature: _CubeSignature, + hashes: Mapping[str, _ArrayHash], + axis: int | None = None, + error_on_mismatch: bool = False, + check_aux_coords: bool = False, + check_cell_measures: bool = False, + check_ancils: bool = False, + check_derived_coords: bool = False, + ) -> bool: + """Determine if the given source-cube is suitable for concatenation. + + Determine if the given source-cube is suitable for concatenation with this :class:`_ProtoCube`. - Args: - - * cube: - The :class:`iris.cube.Cube` source-cube candidate for + Parameters + ---------- + cube_signature : :class:`_CubeSignature` + The :class:`_CubeSignature` of the source-cube candidate for concatenation. - - Kwargs: - - * axis: + hashes : + A mapping containing hash values for checking coordinate, ancillary + variable, and cell measure equality. + axis : optional Seed the dimension of concatenation for the :class:`_ProtoCube` rather than rely on negotiation with source-cubes. - - * error_on_mismatch: + error_on_mismatch : bool, default=False If True, raise an informative error if registration fails. - - Returns: - Boolean. + check_aux_coords : bool, default=False + Checks if the points and bounds of auxiliary coordinates of the + cubes match. This check is not applied to auxiliary coordinates + that span the dimension the concatenation is occurring along. + Defaults to False. + check_cell_measures : bool, default=False + Checks if the data of cell measures of the cubes match. This check + is not applied to cell measures that span the dimension the + concatenation is occurring along. Defaults to False. + check_ancils : bool, default=False + Checks if the data of ancillary variables of the cubes match. This + check is not applied to ancillary variables that span the dimension + the concatenation is occurring along. Defaults to False. + check_derived_coords : bool, default=False + Checks if the points and bounds of derived coordinates of the cubes + match. This check is not applied to derived coordinates that span + the dimension the concatenation is occurring along. Note that + differences in scalar coordinates and dimensional coordinates used + to derive the coordinate are still checked. Checks for auxiliary + coordinates used to derive the coordinates can be ignored with + `check_aux_coords`. Defaults to False. + + Returns + ------- + bool """ # Verify and assert the nominated axis. if axis is not None and self.axis is not None and self.axis != axis: - msg = 'Nominated axis [{}] is not equal ' \ - 'to negotiated axis [{}]'.format(axis, self.axis) + msg = "Nominated axis [{}] is not equal to negotiated axis [{}]".format( + axis, self.axis + ) raise ValueError(msg) # Check for compatible cube signatures. - cube_signature = _CubeSignature(cube) match = self._cube_signature.match(cube_signature, error_on_mismatch) + mismatch_error_msg = None # Check for compatible coordinate signatures. if match: coord_signature = _CoordSignature(cube_signature) - candidate_axis = self._coord_signature.candidate_axis( - coord_signature) - match = candidate_axis is not None and \ - (candidate_axis == axis or axis is None) + candidate_axis = self._coord_signature.candidate_axis(coord_signature) + match = candidate_axis is not None and ( + candidate_axis == axis or axis is None + ) + if not match: + mismatch_error_msg = ( + f"Cannot find an axis to concatenate over for phenomenon " + f"`{self._cube.name()}`" + ) # Check for compatible coordinate extents. if match: dim_ind = self._coord_signature.dim_mapping.index(candidate_axis) - match = self._sequence(coord_signature.dim_extents[dim_ind], - candidate_axis) + match = self._sequence(coord_signature.dim_extents[dim_ind], candidate_axis) + if error_on_mismatch and not match: + mismatch_error_msg = f"Found cubes with overlap on concatenate axis {candidate_axis}, cannot concatenate overlapping cubes" + elif not match: + mismatch_error_msg = f"Found cubes with overlap on concatenate axis {candidate_axis}, skipping concatenation for these cubes" + + def get_hashes( + coord: DimCoord | AuxCoord | AncillaryVariable | CellMeasure, + ) -> tuple[_ArrayHash, ...]: + array_id = _array_id(coord, bound=False) + result = [hashes[array_id]] + if isinstance(coord, (DimCoord, AuxCoord)) and coord.has_bounds(): + bound_array_id = _array_id(coord, bound=True) + result.append(hashes[bound_array_id]) + return tuple(result) + + # Mapping from `_CubeSignature` attributes to human readable names. + coord_type_names = { + "aux_coords_and_dims": "Auxiliary coordinates", + "cell_measures_and_dims": "Cell measures", + "ancillary_variables_and_dims": "Ancillary variables", + "derived_coords_and_dims": "Derived coordinates", + } + + def check_coord_match(coord_type: str) -> tuple[bool, str]: + result = (True, "") + for coord_a, coord_b in zip( + getattr(self._cube_signature, coord_type), + getattr(cube_signature, coord_type), + ): + # Coordinates that span the candidate axis can differ + if ( + candidate_axis not in coord_a.dims + or candidate_axis not in coord_b.dims + ): + if not get_hashes(coord_a.coord) == get_hashes(coord_b.coord): + mismatch_error_msg = ( + f"{coord_type_names[coord_type]} are unequal for phenomenon" + f" `{self._cube.name()}`:\n" + f"a: {coord_a}\n" + f"b: {coord_b}" + ) + result = (False, mismatch_error_msg) + break + + return result # Check for compatible AuxCoords. - if match: - if check_aux_coords: - for coord_a, coord_b in zip( - self._cube_signature.aux_coords_and_dims, - cube_signature.aux_coords_and_dims): - # AuxCoords that span the candidate axis can difffer - if (candidate_axis not in coord_a.dims or - candidate_axis not in coord_b.dims): - if not coord_a == coord_b: - match = False + if match and check_aux_coords: + match, msg = check_coord_match("aux_coords_and_dims") + if not match: + mismatch_error_msg = msg + + # Check for compatible CellMeasures. + if match and check_cell_measures: + match, msg = check_coord_match("cell_measures_and_dims") + if not match: + mismatch_error_msg = msg + + # Check for compatible AncillaryVariables. + if match and check_ancils: + match, msg = check_coord_match("ancillary_variables_and_dims") + if not match: + mismatch_error_msg = msg + + # Check for compatible derived coordinates. + if match and check_derived_coords: + match, msg = check_coord_match("derived_coords_and_dims") + if not match: + mismatch_error_msg = msg if match: # Register the cube as a source-cube for this proto-cube. - self._add_skeleton(coord_signature, cube.lazy_data()) + self._add_skeleton(coord_signature, cube_signature.src_cube.lazy_data()) # Declare the nominated axis of concatenation. self._axis = candidate_axis - - if match: # If the protocube dimension order is constant (indicating it was # created from a cube with a length 1 dimension coordinate) but # a subsequently registered cube has a non-constant dimension @@ -761,20 +1271,26 @@ def register(self, cube, axis=None, error_on_mismatch=False, if existing_order == _CONSTANT and this_order != _CONSTANT: self._coord_signature.dim_order[dim_ind] = this_order + if mismatch_error_msg and not match: + if error_on_mismatch: + raise iris.exceptions.ConcatenateError([mismatch_error_msg]) + else: + warnings.warn( + mismatch_error_msg, category=iris.warnings.IrisUserWarning + ) + return match def _add_skeleton(self, coord_signature, data): - """ - Create and add the source-cube skeleton to the - :class:`_ProtoCube`. + """Create and add the source-cube skeleton to the :class:`_ProtoCube`. - Args: - - * coord_signature: + Parameters + ---------- + coord_signature : :`_CoordSignature` The :class:`_CoordSignature` of the associated given source-cube. - * data: + data : :class:`iris.cube.Cube` The data payload of an associated :class:`iris.cube.Cube` source-cube. @@ -783,12 +1299,14 @@ def _add_skeleton(self, coord_signature, data): self._skeletons.append(skeleton) def _build_aux_coordinates(self): - """ + """Generate the auxiliary coordinates with associated dimension(s) mapping. + Generate the auxiliary coordinates with associated dimension(s) mapping for the new concatenated cube. - Returns: - A list of auxiliary coordinates and dimension(s) tuple pairs. + Returns + ------- + A list of auxiliary coordinates and dimension(s) tuple pairs. """ # Setup convenience hooks. @@ -804,15 +1322,19 @@ def _build_aux_coordinates(self): if self.axis in dims: # Concatenate the points together. dim = dims.index(self.axis) - points = [skton.signature.aux_coords_and_dims[i].coord.points - for skton in skeletons] + points = [ + skton.signature.aux_coords_and_dims[i].coord.core_points() + for skton in skeletons + ] points = np.concatenate(tuple(points), axis=dim) # Concatenate the bounds together. bnds = None if coord.has_bounds(): - bnds = [skton.signature.aux_coords_and_dims[i].coord.bounds - for skton in skeletons] + bnds = [ + skton.signature.aux_coords_and_dims[i].coord.core_bounds() + for skton in skeletons + ] bnds = np.concatenate(tuple(bnds), axis=dim) # Generate the associated coordinate metadata. @@ -825,42 +1347,205 @@ def _build_aux_coordinates(self): # Attempt to create a DimCoord, otherwise default to # an AuxCoord on failure. try: - coord = iris.coords.DimCoord(points, bounds=bnds, - **kwargs) + coord = iris.coords.DimCoord(points, bounds=bnds, **kwargs) except ValueError: - coord = iris.coords.AuxCoord(points, bounds=bnds, - **kwargs) + # Ensure to remove the "circular" kwarg, which may be + # present in the defn of a DimCoord being demoted. + _ = kwargs.pop("circular", None) + coord = iris.coords.AuxCoord(points, bounds=bnds, **kwargs) aux_coords_and_dims.append((coord.copy(), dims)) - # Generate all the scalar coordinates for the new concatenated cube. - for coord in cube_signature.scalar_coords: - aux_coords_and_dims.append((coord.copy(), ())) - return aux_coords_and_dims - def _build_data(self): + def _build_scalar_coordinates(self): + """Generate the scalar coordinates for the new concatenated cube. + + Returns + ------- + A list of scalar coordinates. + + """ + scalar_coords = [] + for coord in self._cube_signature.scalar_coords: + scalar_coords.append(coord.copy()) + + return scalar_coords + + def _build_cell_measures(self): + """Generate the cell measures with associated dimension(s) mapping. + + Generate the cell measures with associated dimension(s) + mapping for the new concatenated cube. + + Returns + ------- + A list of cell measures and dimension(s) tuple pairs. + + """ + # Setup convenience hooks. + skeletons = self._skeletons + cube_signature = self._cube_signature + + cell_measures_and_dims = [] + + # Generate all the cell measures for the new concatenated cube. + for i, (cm, dims) in enumerate(cube_signature.cell_measures_and_dims): + # Check whether the cell measure spans the nominated + # dimension of concatenation. + if self.axis in dims: + # Concatenate the data together. + dim = dims.index(self.axis) + data = [ + skton.signature.cell_measures_and_dims[i].coord.core_data() + for skton in skeletons + ] + data = concatenate_arrays(tuple(data), axis=dim) + + # Generate the associated metadata. + kwargs = cube_signature.cm_metadata[i].defn._asdict() + + # Build the concatenated coordinate. + cm = iris.coords.CellMeasure(data, **kwargs) + + cell_measures_and_dims.append((cm.copy(), dims)) + + return cell_measures_and_dims + + def _build_ancillary_variables(self): + """Generate the ancillary variables with associated dimension(s) mapping. + + Generate the ancillary variables with associated dimension(s) + mapping for the new concatenated cube. + + Returns + ------- + A list of ancillary variables and dimension(s) tuple pairs. + + """ + # Setup convenience hooks. + skeletons = self._skeletons + cube_signature = self._cube_signature + + ancillary_variables_and_dims = [] + + # Generate all the ancillary variables for the new concatenated cube. + for i, (av, dims) in enumerate(cube_signature.ancillary_variables_and_dims): + # Check whether the ancillary variable spans the nominated + # dimension of concatenation. + if self.axis in dims: + # Concatenate the data together. + dim = dims.index(self.axis) + data = [ + skton.signature.ancillary_variables_and_dims[i].coord.core_data() + for skton in skeletons + ] + data = concatenate_arrays(tuple(data), axis=dim) + + # Generate the associated metadata. + kwargs = cube_signature.av_metadata[i].defn._asdict() + + # Build the concatenated coordinate. + av = iris.coords.AncillaryVariable(data, **kwargs) + + ancillary_variables_and_dims.append((av.copy(), dims)) + + return ancillary_variables_and_dims + + def _build_aux_factories( + self, dim_coords_and_dims, aux_coords_and_dims, scalar_coords + ): + """Generate the aux factories for the new concatenated cube. + + Parameters + ---------- + dim_coords_and_dims : + A list of dimension coordinate and dimension tuple pairs from the + concatenated cube. + aux_coords_and_dims : + A list of auxiliary coordinates and dimension(s) tuple pairs from + the concatenated cube. + scalar_coords : + A list of scalar coordinates from the concatenated cube. + + Returns + ------- + list of :class:`iris.aux_factory.AuxCoordFactory` + """ - Generate the data payload for the new concatenated cube. + # Setup convenience hooks. + cube_signature = self._cube_signature + old_dim_coords = cube_signature.dim_coords + old_aux_coords = [a[0] for a in cube_signature.aux_coords_and_dims] + new_dim_coords = [d[0] for d in dim_coords_and_dims] + new_aux_coords = [a[0] for a in aux_coords_and_dims] + old_scalar_coords = cube_signature.scalar_coords + new_scalar_coords = scalar_coords + + aux_factories = [] + + # Generate all the factories for the new concatenated cube. + for _, _, factory in cube_signature.derived_coords_and_dims: + # Update the dependencies of the factory with coordinates of + # the concatenated cube. We need to check all coordinate types + # here (dim coords, aux coords, and scalar coords). + + # Note: in contrast to other _build_... methods of this class, we + # do NOT need to distinguish between aux factories that span the + # nominated concatenation axis and aux factories that do not. The + # reason is that ALL aux factories need to be updated with the new + # coordinates of the concatenated cube (passed to this function via + # dim_coords_and_dims, aux_coords_and_dims, scalar_coords [these + # contain ALL new coordinates, not only the ones spanning the + # concatenation dimension]), so no special treatment for the aux + # factories that span the concatenation dimension is necessary. If + # not all aux factories are properly updated with references to the + # new coordinates, this may lead to KeyErrors (see + # https://github.com/SciTools/iris/issues/5339). + new_dependencies = {} + for old_dependency in factory.dependencies.values(): + if old_dependency in old_dim_coords: + dep_idx = old_dim_coords.index(old_dependency) + new_dependency = new_dim_coords[dep_idx] + elif old_dependency in old_aux_coords: + dep_idx = old_aux_coords.index(old_dependency) + new_dependency = new_aux_coords[dep_idx] + else: + dep_idx = old_scalar_coords.index(old_dependency) + new_dependency = new_scalar_coords[dep_idx] + new_dependencies[id(old_dependency)] = new_dependency - Returns: - The concatenated :class:`iris.cube.Cube` data payload. + # Create new factory with the updated dependencies. + factory = factory.updated(new_dependencies) + + aux_factories.append(factory) + + return aux_factories + + def _build_data(self): + """Generate the data payload for the new concatenated cube. + + Returns + ------- + The concatenated :class:`iris.cube.Cube` data payload. """ skeletons = self._skeletons data = [skeleton.data for skeleton in skeletons] - data = da.concatenate(data, self.axis) + data = concatenate_arrays(data, self.axis) return data def _build_dim_coordinates(self): - """ + """Generate the dimension coordinates. + Generate the dimension coordinates with associated dimension mapping for the new concatenated cube. - Return: - A list of dimension coordinate and dimension tuple pairs. + Returns + ------- + A list of dimension coordinate and dimension tuple pairs. """ # Setup convenience hooks. @@ -868,24 +1553,28 @@ def _build_dim_coordinates(self): axis = self.axis dim_ind = self._cube_signature.dim_mapping.index(axis) metadata = self._cube_signature.dim_metadata[dim_ind] - defn, circular = metadata.defn, metadata.kwargs['circular'] + defn, circular = metadata.defn, metadata.kwargs["circular"] # Concatenate the points together for the nominated dimension. - points = [skeleton.signature.dim_coords[dim_ind].points - for skeleton in skeletons] + points = [ + skeleton.signature.dim_coords[dim_ind].core_points() + for skeleton in skeletons + ] points = np.concatenate(tuple(points)) # Concatenate the bounds together for the nominated dimension. bounds = None if self._cube_signature.dim_coords[dim_ind].has_bounds(): - bounds = [skeleton.signature.dim_coords[dim_ind].bounds - for skeleton in skeletons] + bounds = [ + skeleton.signature.dim_coords[dim_ind].core_bounds() + for skeleton in skeletons + ] bounds = np.concatenate(tuple(bounds)) # Populate the new dimension coordinate with the concatenated # points, bounds and associated metadata. kwargs = defn._asdict() - kwargs['circular'] = circular + kwargs["circular"] = circular dim_coord = iris.coords.DimCoord(points, bounds=bounds, **kwargs) # Generate all the dimension coordinates for the new concatenated cube. @@ -900,30 +1589,32 @@ def _build_dim_coordinates(self): return dim_coords_and_dims def _sequence(self, extent, axis): - """ + """Determine whether the extent can be sequenced. + Determine whether the given extent can be sequenced along with all the extents of the source-cubes already registered with this :class:`_ProtoCube` into non-overlapping segments for the given axis. - Args: - - * extent: + Parameters + ---------- + extent : :class:`_CoordExtent` The :class:`_CoordExtent` of the candidate source-cube. - - * axis: + axis : The candidate axis of concatenation. - Returns: - Boolean. + Returns + ------- + bool """ result = True # Add the new extent to the current extents collection. dim_ind = self._coord_signature.dim_mapping.index(axis) - dim_extents = [skeleton.signature.dim_extents[dim_ind] - for skeleton in self._skeletons] + dim_extents = [ + skeleton.signature.dim_extents[dim_ind] for skeleton in self._skeletons + ] dim_extents.append(extent) # Sort into the appropriate dimension order. diff --git a/lib/iris/_constraints.py b/lib/iris/_constraints.py index 18b7fb1f54..765a975651 100644 --- a/lib/iris/_constraints.py +++ b/lib/iris/_constraints.py @@ -1,42 +1,20 @@ -# (C) British Crown Copyright 2010 - 2019, Met Office +# Copyright Iris contributors # -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . -""" -Provides objects for building up expressions useful for pattern matching. - -""" - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa -import six +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Provide objects for building up expressions useful for pattern matching.""" -try: # Python 3 - from collections.abc import Iterable, Mapping -except ImportError: # Python 2.7 - from collections import Iterable, Mapping +from collections.abc import Iterable, Mapping import operator import numpy as np -import iris.coords import iris.exceptions -class Constraint(object): - """ +class Constraint: + """Cubes can be pattern matched and filtered according to specific criteria. + Constraints are the mechanism by which cubes can be pattern matched and filtered according to specific criteria. @@ -44,22 +22,25 @@ class Constraint(object): :meth:`Constraint.extract` method. """ + def __init__(self, name=None, cube_func=None, coord_values=None, **kwargs): - """ + """Use for filtering cube loading or cube list extraction. + Creates a new instance of a Constraint which can be used for filtering cube loading or cube list extraction. - Args: - - * name: string or None - If a string, it is used as the name to match against Cube.name(). - * cube_func: callable or None + Parameters + ---------- + name : str or None, optional + If a string, it is used as the name to match against the + :attr:`iris.cube.Cube.names` property. + cube_func : callable or None, optional If a callable, it must accept a Cube as its first and only argument and return either True or False. - * coord_values: dict or None + coord_values : dict or None, optional If a dict, it must map coordinate name to the condition on the associated coordinate. - * `**kwargs`: + ***kwargs : dict, optional The remaining keyword arguments are converted to coordinate constraints. The name of the argument gives the name of a coordinate, and the value of the argument is the condition to meet @@ -80,6 +61,8 @@ def __init__(self, name=None, cube_func=None, coord_values=None, **kwargs): returning True or False if the value of the Cell is desired. e.g. ``model_level_number=lambda cell: 5 < cell < 10`` + Examples + -------- The :ref:`user guide ` covers cube much of constraining in detail, however an example which uses all of the features of this class is given here for completeness:: @@ -90,26 +73,49 @@ def __init__(self, name=None, cube_func=None, coord_values=None, **kwargs): model_level_number=[10, 12]) & Constraint(ensemble_member=2) + .. note:: + Whilst ``&`` is supported, the ``|`` that might reasonably be expected + is not. This is because each constraint describes a boxlike region, and + thus the intersection of these constraints (obtained with ``&``) will + also describe a boxlike region. Allowing the union of two constraints + (with the ``|`` symbol) would allow the description of a non-boxlike + region. These are difficult to describe with cubes and so it would be + ambiguous what should be extracted. + + To generate multiple cubes, each constrained to a different range of + the same coordinate, use :py:func:`iris.load_cubes` or + :py:func:`iris.cube.CubeList.extract_cubes`. + + A cube can be constrained to multiple ranges within the same coordinate + using something like the following constraint:: + + def latitude_bands(cell): + return (0 < cell < 30) or (60 < cell < 90) + + Constraint(cube_func=latitude_bands) + Constraint filtering is performed at the cell level. For further details on how cell comparisons are performed see :class:`iris.coords.Cell`. """ - if not (name is None or isinstance(name, six.string_types)): - raise TypeError('name must be None or string, got %r' % name) + if not (name is None or isinstance(name, str)): + raise TypeError("name must be None or string, got %r" % name) if not (cube_func is None or callable(cube_func)): - raise TypeError('cube_func must be None or callable, got %r' - % cube_func) - if not (coord_values is None or isinstance(coord_values, - Mapping)): - raise TypeError('coord_values must be None or a ' - 'collections.Mapping, got %r' % coord_values) + raise TypeError("cube_func must be None or callable, got %r" % cube_func) + if not (coord_values is None or isinstance(coord_values, Mapping)): + raise TypeError( + "coord_values must be None or a " + "collections.Mapping, got %r" % coord_values + ) coord_values = coord_values or {} duplicate_keys = set(coord_values.keys()) & set(kwargs.keys()) if duplicate_keys: - raise ValueError('Duplicate coordinate conditions specified for: ' - '%s' % list(duplicate_keys)) + raise ValueError( + "Duplicate coordinate conditions specified for: " + "%s" % list(duplicate_keys) + ) self._name = name self._cube_func = cube_func @@ -119,34 +125,61 @@ def __init__(self, name=None, cube_func=None, coord_values=None, **kwargs): self._coord_constraints = [] for coord_name, coord_thing in self._coord_values.items(): - self._coord_constraints.append(_CoordConstraint(coord_name, - coord_thing)) + self._coord_constraints.append(_CoordConstraint(coord_name, coord_thing)) + + def __eq__(self, other): + # Equivalence is defined, but is naturally limited for any Constraints + # based on callables, i.e. "cube_func", or value functions for + # attributes/names/coords : These can only be == if they contain the + # *same* callable object (i.e. same object identity). + eq = ( + isinstance(other, Constraint) + and self._name == other._name + and self._cube_func == other._cube_func + and self._coord_constraints == other._coord_constraints + ) + # NOTE: theoretically, you could compare coord constraints as a *set*, + # as order should not affect matching. + # Not totally sure, so for now let's not. + return eq + + def __hash__(self): + # We want constraints to have hashes, so they can act as e.g. + # dictionary keys or tuple elements. + # So, we *must* provide this, as overloading '__eq__' automatically + # disables it. + # Just use basic object identity. + return id(self) def __repr__(self): args = [] if self._name: - args.append(('name', self._name)) + args.append(("name", self._name)) if self._cube_func: - args.append(('cube_func', self._cube_func)) + args.append(("cube_func", self._cube_func)) if self._coord_values: - args.append(('coord_values', self._coord_values)) - return 'Constraint(%s)' % ', '.join('%s=%r' % (k, v) for k, v in args) + args.append(("coord_values", self._coord_values)) + return "Constraint(%s)" % ", ".join("%s=%r" % (k, v) for k, v in args) def _coordless_match(self, cube): - """ + """Return whether this constraint matches the given cube. + Return whether this constraint matches the given cube when not taking coordinates into account. """ match = True if self._name: - match = self._name == cube.name() + # Require to also check against cube.name() for the fallback + # "unknown" default case, when there is no name metadata available. + match = self._name in cube._names or self._name == cube.name() if match and self._cube_func: match = self._cube_func(cube) return match def extract(self, cube): - """ + """Return the subset of the given cube which matches this constraint. + Return the subset of the given cube which matches this constraint, else return None. @@ -188,63 +221,97 @@ def __rand__(self, other): class ConstraintCombination(Constraint): """Represents the binary combination of two Constraint instances.""" + def __init__(self, lhs, rhs, operator): - """ - A ConstraintCombination instance is created by providing two - Constraint instances and the appropriate :mod:`operator`. + """Instance created by providing two Constraint instances. + + Instance created by providing two Constraint instances and the + appropriate :mod:`operator`. """ try: lhs_constraint = as_constraint(lhs) rhs_constraint = as_constraint(rhs) except TypeError: - raise TypeError('Can only combine Constraint instances, ' - 'got: %s and %s' % (type(lhs), type(rhs))) + raise TypeError( + "Can only combine Constraint instances, " + "got: %s and %s" % (type(lhs), type(rhs)) + ) self.lhs = lhs_constraint self.rhs = rhs_constraint self.operator = operator + def __eq__(self, other): + eq = ( + isinstance(other, ConstraintCombination) + and self.lhs == other.lhs + and self.rhs == other.rhs + and self.operator == other.operator + ) + return eq + + def __hash__(self): + # Must re-define if you overload __eq__ : Use object identity. + return id(self) + def _coordless_match(self, cube): - return self.operator(self.lhs._coordless_match(cube), - self.rhs._coordless_match(cube)) + return self.operator( + self.lhs._coordless_match(cube), self.rhs._coordless_match(cube) + ) def __repr__(self): - return 'ConstraintCombination(%r, %r, %r)' % (self.lhs, self.rhs, - self.operator) + return "ConstraintCombination(%r, %r, %r)" % ( + self.lhs, + self.rhs, + self.operator, + ) def _CIM_extract(self, cube): - return self.operator(self.lhs._CIM_extract(cube), - self.rhs._CIM_extract(cube)) + return self.operator(self.lhs._CIM_extract(cube), self.rhs._CIM_extract(cube)) -class _CoordConstraint(object): +class _CoordConstraint: """Represents the atomic elements which might build up a Constraint.""" + def __init__(self, coord_name, coord_thing): - """ + """Create a coordinate constraint. + Create a coordinate constraint given the coordinate name and a thing to compare it with. - Arguments: - - * coord_name - string - The name of the coordinate to constrain - * coord_thing - The object to compare + Parameters + ---------- + coord_name : str + The name of the coordinate to constrain. + coord_thing : + The object to compare. """ self.coord_name = coord_name self._coord_thing = coord_thing def __repr__(self): - return '_CoordConstraint(%r, %r)' % (self.coord_name, - self._coord_thing) + return "_CoordConstraint(%r, %r)" % ( + self.coord_name, + self._coord_thing, + ) + + def __eq__(self, other): + eq = ( + isinstance(other, _CoordConstraint) + and self.coord_name == other.coord_name + and self._coord_thing == other._coord_thing + ) + return eq + + def __hash__(self): + # Must re-define if you overload __eq__ : Use object identity. + return id(self) def extract(self, cube): - """ - Returns the the column based indices of the given cube which - match the constraint. + """Return the column based indices of the cube which match the constraint.""" + from iris.coords import Cell, DimCoord - """ # Cater for scalar cubes by setting the dimensionality to 1 # when cube.ndim is 0. cube_cim = _ColumnIndexManager(cube.ndim or 1) @@ -255,29 +322,35 @@ def extract(self, cube): return cube_cim dims = cube.coord_dims(coord) if len(dims) > 1: - msg = 'Cannot apply constraints to multidimensional coordinates' + msg = "Cannot apply constraints to multidimensional coordinates" raise iris.exceptions.CoordinateMultiDimError(msg) try_quick = False if callable(self._coord_thing): call_func = self._coord_thing - elif (isinstance(self._coord_thing, Iterable) and - not isinstance(self._coord_thing, - (six.string_types, iris.coords.Cell))): + elif isinstance(self._coord_thing, Iterable) and not isinstance( + self._coord_thing, (str, Cell) + ): desired_values = list(self._coord_thing) # A dramatic speedup can be had if we don't have bounds. if coord.has_bounds(): + def call_func(cell): return cell in desired_values + else: + def call_func(cell): return cell.point in desired_values + else: + def call_func(c): return c == self._coord_thing - try_quick = (isinstance(coord, iris.coords.DimCoord) and - not isinstance(self._coord_thing, iris.coords.Cell)) + try_quick = isinstance(coord, DimCoord) and not isinstance( + self._coord_thing, Cell + ) # Simple, yet dramatic, optimisation for the monotonic case. if try_quick: @@ -286,7 +359,7 @@ def call_func(c): except TypeError: try_quick = False if try_quick: - r = np.zeros(coord.shape, dtype=np.bool) + r = np.zeros(coord.shape, dtype=np.bool_) if coord.cell(i) == self._coord_thing: r[i] = True else: @@ -298,10 +371,11 @@ def call_func(c): return cube_cim -class _ColumnIndexManager(object): - """ - A class to represent column aligned slices which can be operated on - using ``&``, ``|`` or ``^``. +class _ColumnIndexManager: + """Represent column aligned slices which can be operated on. + + Represent column aligned slices which can be operated on using + ``&``, ``|`` or ``^``. :: @@ -312,12 +386,9 @@ class _ColumnIndexManager(object): print(cim.as_slice()) """ - def __init__(self, ndims): - """ - A _ColumnIndexManager is always created to span the given - number of dimensions. - """ + def __init__(self, ndims): + """_ColumnIndexManager always created to span the given number of dimensions.""" self._column_arrays = [True] * ndims self.ndims = ndims @@ -335,8 +406,10 @@ def _bitwise_operator(self, other, operator): return NotImplemented if self.ndims != other.ndims: - raise ValueError('Cannot do %s for %r and %r as they have a ' - 'different number of dimensions.' % operator) + raise ValueError( + "Cannot do %s for %r and %r as they have a " + "different number of dimensions." % operator + ) r = _ColumnIndexManager(self.ndims) # iterate over each dimension an combine appropriately for i, (lhs, rhs) in enumerate(zip(self, other)): @@ -356,12 +429,15 @@ def __setitem__(self, key, value): if is_vector or isinstance(value, bool): self._column_arrays[key] = value else: - raise TypeError('Expecting value to be a 1 dimensional numpy array' - ', or a boolean. Got %s' % (type(value))) + raise TypeError( + "Expecting value to be a 1 dimensional numpy array" + ", or a boolean. Got %s" % (type(value)) + ) def as_slice(self): - """ - Turns a _ColumnIndexManager into a tuple which can be used in an + """Turn a _ColumnIndexManager into a tuple. + + Turn a _ColumnIndexManager into a tuple which can be used in an indexing operation. If no index is possible, None will be returned. @@ -389,8 +465,7 @@ def as_slice(self): delta = np.diff(where_true, axis=0) # if the diff is consistent we can create a slice object if all(delta[0] == delta): - result[dim] = slice(where_true[0], where_true[-1] + 1, - delta[0]) + result[dim] = slice(where_true[0], where_true[-1] + 1, delta[0]) else: # otherwise, key is a tuple result[dim] = tuple(where_true) @@ -409,20 +484,17 @@ def as_slice(self): def list_of_constraints(constraints): - """ - Turns the given constraints into a list of valid constraints - using :func:`as_constraint`. - - """ - if not isinstance(constraints, (list, tuple)): + """Turn constraints into list of valid constraints using :func:`as_constraint`.""" + if isinstance(constraints, str) or not isinstance(constraints, Iterable): constraints = [constraints] return [as_constraint(constraint) for constraint in constraints] -def as_constraint(thing): - """ - Casts an object into a cube constraint where possible, otherwise +def as_constraint(thing: Constraint | str | None) -> Constraint: + """Cast an object into a cube constraint where possible. + + Cast an object into a cube constraint where possible, otherwise a TypeError will be raised. If the given object is already a valid constraint then the given object @@ -433,16 +505,18 @@ def as_constraint(thing): return thing elif thing is None: return Constraint() - elif isinstance(thing, six.string_types): + elif isinstance(thing, str): return Constraint(thing) else: - raise TypeError('%r cannot be cast to a constraint.' % thing) + raise TypeError("%r cannot be cast to a constraint." % thing) class AttributeConstraint(Constraint): """Provides a simple Cube-attribute based :class:`Constraint`.""" + def __init__(self, **attributes): - """ + """Provide a simple Cube-attribute based :class:`Constraint`. + Example usage:: iris.AttributeConstraint(STASH='m01s16i004') @@ -454,11 +528,22 @@ def __init__(self, **attributes): """ self._attributes = attributes - Constraint.__init__(self, cube_func=self._cube_func) + super().__init__(cube_func=self._cube_func) + + def __eq__(self, other): + eq = ( + isinstance(other, AttributeConstraint) + and self._attributes == other._attributes + ) + return eq + + def __hash__(self): + # Must re-define if you overload __eq__ : Use object identity. + return id(self) def _cube_func(self, cube): match = True - for name, value in six.iteritems(self._attributes): + for name, value in self._attributes.items(): if name in cube.attributes: cube_attr = cube.attributes.get(name) # if we have a callable, then call it with the value, @@ -477,4 +562,119 @@ def _cube_func(self, cube): return match def __repr__(self): - return 'AttributeConstraint(%r)' % self._attributes + return "AttributeConstraint(%r)" % self._attributes + + +class NameConstraint(Constraint): + """Provide a simple Cube name based :class:`Constraint`.""" + + def __init__( + self, + standard_name="none", + long_name="none", + var_name="none", + STASH="none", + ): + """Provide a simple Cube name based :class:`Constraint`. + + Provide a simple Cube name based :class:`Constraint`, which matches + against each of the names provided, which may be either standard name, + long name, NetCDF variable name and/or the STASH from the attributes + dictionary. + + The name constraint will only succeed if *all* of the provided names + match. + + Parameters + ---------- + standard_name : optional + A string or callable representing the standard name to match + against. + long_name : optional + A string or callable representing the long name to match against. + var_name : optional + A string or callable representing the NetCDF variable name to match + against. + STASH : optional + A string or callable representing the UM STASH code to match + against. + + Notes + ----- + The default value of each of the keyword arguments is the string + "none", rather than the singleton None, as None may be a legitimate + value to be matched against e.g., to constrain against all cubes + where the standard_name is not set, then use standard_name=None. + + Returns + ------- + bool + + Examples + -------- + Example usage:: + + iris.NameConstraint(long_name='air temp', var_name=None) + + iris.NameConstraint(long_name=lambda name: 'temp' in name) + + iris.NameConstraint(standard_name='air_temperature', + STASH=lambda stash: stash.item == 203) + """ + self.standard_name = standard_name + self.long_name = long_name + self.var_name = var_name + self.STASH = STASH + self._names = ("standard_name", "long_name", "var_name", "STASH") + super().__init__(cube_func=self._cube_func) + + def __eq__(self, other): + eq = isinstance(other, NameConstraint) and all( + getattr(self, attname) == getattr(other, attname) for attname in self._names + ) + return eq + + def __hash__(self): + # Must re-define if you overload __eq__ : Use object identity. + return id(self) + + def _cube_func(self, cube): + def matcher(target, value): + if callable(value): + result = False + if target is not None: + # + # Don't pass None through into the callable. Users should + # use the "name=None" pattern instead. Otherwise, users + # will need to explicitly handle the None case, which is + # unnecessary and pretty darn ugly e.g., + # + # lambda name: name is not None and name.startswith('ick') + # + result = value(target) + else: + result = value == target + return result + + match = True + for name in self._names: + expected = getattr(self, name) + if expected != "none": + if name == "STASH": + actual = cube.attributes.get(name) + else: + actual = getattr(cube, name) + match = matcher(actual, expected) + # Make this is a short-circuit match. + if match is False: + break + + return match + + def __repr__(self): + names = [] + for name in self._names: + value = getattr(self, name) + if value != "none": + names.append("{}={!r}".format(name, value)) + return "{}({})".format(self.__class__.__name__, ", ".join(names)) diff --git a/lib/iris/_cube_coord_common.py b/lib/iris/_cube_coord_common.py deleted file mode 100644 index 6225b6f64c..0000000000 --- a/lib/iris/_cube_coord_common.py +++ /dev/null @@ -1,237 +0,0 @@ -# (C) British Crown Copyright 2010 - 2019, Met Office -# -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa -import six - -import re -import string - -import cf_units - -import iris.std_names - - -# https://www.unidata.ucar.edu/software/netcdf/docs/netcdf_data_set_components.html#object_name -_TOKEN_PARSE = re.compile(r'''^[a-zA-Z0-9][\w\.\+\-@]*$''') - - -def get_valid_standard_name(name): - # Standard names are optionally followed by a standard name - # modifier, separated by one or more blank spaces - - if name is not None: - name_is_valid = False - # Supported standard name modifiers. Ref: [CF] Appendix C. - valid_std_name_modifiers = ['detection_minimum', - 'number_of_observations', - 'standard_error', - 'status_flag'] - - valid_name_pattern = re.compile(r'''^([a-zA-Z_]+)( *)([a-zA-Z_]*)$''') - name_groups = valid_name_pattern.match(name) - - if name_groups: - std_name, whitespace, std_name_modifier = name_groups.groups() - if (std_name in iris.std_names.STD_NAMES) and ( - bool(whitespace) == (std_name_modifier in - valid_std_name_modifiers)): - name_is_valid = True - - if name_is_valid is False: - raise ValueError('{!r} is not a valid standard_name'.format( - name)) - - return name - - -class LimitedAttributeDict(dict): - _forbidden_keys = ('standard_name', 'long_name', 'units', 'bounds', 'axis', - 'calendar', 'leap_month', 'leap_year', 'month_lengths', - 'coordinates', 'grid_mapping', 'climatology', - 'cell_methods', 'formula_terms', 'compress', - 'add_offset', 'scale_factor', - '_FillValue') - - def __init__(self, *args, **kwargs): - dict.__init__(self, *args, **kwargs) - # Check validity of keys - for key in six.iterkeys(self): - if key in self._forbidden_keys: - raise ValueError('%r is not a permitted attribute' % key) - - def __eq__(self, other): - # Extend equality to allow for NumPy arrays. - match = set(self.keys()) == set(other.keys()) - if match: - for key, value in six.iteritems(self): - match = value == other[key] - try: - match = bool(match) - except ValueError: - match = match.all() - if not match: - break - return match - - def __ne__(self, other): - return not self == other - - def __setitem__(self, key, value): - if key in self._forbidden_keys: - raise ValueError('%r is not a permitted attribute' % key) - dict.__setitem__(self, key, value) - - def update(self, other, **kwargs): - # Gather incoming keys - keys = [] - if hasattr(other, "keys"): - keys += list(other.keys()) - else: - keys += [k for k, v in other] - - keys += list(kwargs.keys()) - - # Check validity of keys - for key in keys: - if key in self._forbidden_keys: - raise ValueError('%r is not a permitted attribute' % key) - - dict.update(self, other, **kwargs) - - -class CFVariableMixin(object): - - _DEFAULT_NAME = 'unknown' # the name default string - - @staticmethod - def token(name): - ''' - Determine whether the provided name is a valid NetCDF name and thus - safe to represent a single parsable token. - - Args: - - * name: - The string name to verify - - Returns: - The provided name if valid, otherwise None. - - ''' - if name is not None: - result = _TOKEN_PARSE.match(name) - name = result if result is None else name - return name - - def name(self, default=None, token=False): - """ - Returns a human-readable name. - - First it tries :attr:`standard_name`, then 'long_name', then - 'var_name', then the STASH attribute before falling back to - the value of `default` (which itself defaults to 'unknown'). - - Kwargs: - - * default: - The value of the default name. - * token: - If true, ensure that the name returned satisfies the criteria for - the characters required by a valid NetCDF name. If it is not - possible to return a valid name, then a ValueError exception is - raised. - - Returns: - String. - - """ - def _check(item): - return self.token(item) if token else item - - default = self._DEFAULT_NAME if default is None else default - - result = (_check(self.standard_name) or _check(self.long_name) or - _check(self.var_name) or - _check(str(self.attributes.get('STASH', ''))) or - _check(default)) - - if token and result is None: - emsg = 'Cannot retrieve a valid name token from {!r}' - raise ValueError(emsg.format(self)) - - return result - - def rename(self, name): - """ - Changes the human-readable name. - - If 'name' is a valid standard name it will assign it to - :attr:`standard_name`, otherwise it will assign it to - :attr:`long_name`. - - """ - try: - self.standard_name = name - self.long_name = None - except ValueError: - self.standard_name = None - self.long_name = six.text_type(name) - - # Always clear var_name when renaming. - self.var_name = None - - @property - def standard_name(self): - """The standard name for the Cube's data.""" - return self._standard_name - - @standard_name.setter - def standard_name(self, name): - self._standard_name = get_valid_standard_name(name) - - @property - def units(self): - """The :mod:`~cf_units.Unit` instance of the object.""" - return self._units - - @units.setter - def units(self, unit): - self._units = cf_units.as_unit(unit) - - @property - def var_name(self): - """The netCDF variable name for the object.""" - return self._var_name - - @var_name.setter - def var_name(self, name): - if name is not None: - result = self.token(name) - if result is None or not name: - emsg = '{!r} is not a valid NetCDF variable name.' - raise ValueError(emsg.format(name)) - self._var_name = name - - @property - def attributes(self): - return self._attributes - - @attributes.setter - def attributes(self, attributes): - self._attributes = LimitedAttributeDict(attributes or {}) diff --git a/lib/iris/_data_manager.py b/lib/iris/_data_manager.py index 8f6e9e4e0c..dbd122ba04 100644 --- a/lib/iris/_data_manager.py +++ b/lib/iris/_data_manager.py @@ -1,49 +1,26 @@ -# (C) British Crown Copyright 2010 - 2017, Met Office +# Copyright Iris contributors # -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . -""" -Management of common state and behaviour for cube and coordinate data. - -""" - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa -import six +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Management of common state and behaviour for cube and coordinate data.""" import copy + import numpy as np import numpy.ma as ma from iris._lazy_data import as_concrete_data, as_lazy_data, is_lazy_data -from iris.util import array_equal - -class DataManager(object): - """ - Provides a well defined API for management of real or lazy data. - """ +class DataManager: + """Provides a well defined API for management of real or lazy data.""" def __init__(self, data): - """ - Create a data manager for the specified data. - - Args: + """Create a data manager for the specified data. - * data: + Parameters + ---------- + data : The :class:`~numpy.ndarray` or :class:`~numpy.ma.core.MaskedArray` real data, or :class:`~dask.array.core.Array` lazy data to be managed. @@ -60,31 +37,28 @@ def __init__(self, data): self._assert_axioms() def __copy__(self): - """ - Forbid :class:`~iris._data_manager.DataManager` instance - shallow-copy support. - - """ + """Forbid :class:`~iris._data_manager.DataManager` instance shallow-copy support.""" name = type(self).__name__ - emsg = ('Shallow-copy of {!r} is not permitted. Use ' - 'copy.deepcopy() or {}.copy() instead.') + emsg = ( + "Shallow-copy of {!r} is not permitted. Use " + "copy.deepcopy() or {}.copy() instead." + ) raise copy.Error(emsg.format(name, name)) def __deepcopy__(self, memo): - """ - Allow :class:`~iris._data_manager.DataManager` instance - deepcopy support. + """Allow :class:`~iris._data_manager.DataManager` instance deepcopy support. - Args: - - * memo: - :class:`copy` memo dictionary. + Parameters + ---------- + memo : :func:`copy` + :func:`copy` memo dictionary. """ return self._deepcopy(memo) def __eq__(self, other): - """ + """Perform :class:`~iris._data_manager.DataManager` instance equality. + Perform :class:`~iris._data_manager.DataManager` instance equality. Note that, this is explicitly not a lazy operation and will load any lazy payload to determine the equality result. @@ -93,16 +67,19 @@ def __eq__(self, other): the realised_dtype, the dtype of the payload, the fill-value and the payload content. - Args: - - * other: + Parameters + ---------- + other : :class:`~iris._data_manager.DataManager` The :class:`~iris._data_manager.DataManager` instance to compare with. - Returns: - Boolean. + Returns + ------- + bool """ + from iris.util import array_equal + result = NotImplemented if isinstance(other, type(self)): @@ -115,19 +92,21 @@ def __eq__(self, other): return result def __ne__(self, other): - """ + """Perform :class:`~iris._data_manager.DataManager` instance inequality. + Perform :class:`~iris._data_manager.DataManager` instance inequality. Note that, this is explicitly not a lazy operation and will load any lazy payload to determine the inequality result. - Args: - - * other: + Parameters + ---------- + other : :class:`~iris._data_manager.DataManager` The :class:`~iris._data_manager.DataManager` instance to compare with. - Returns: - Boolean. + Returns + ------- + bool """ result = self.__eq__(other) @@ -138,46 +117,35 @@ def __ne__(self, other): return result def __repr__(self): - """ - Returns an string representation of the instance. - - """ - fmt = '{cls}({data!r})' + """Return an string representation of the instance.""" + fmt = "{cls}({data!r})" result = fmt.format(data=self.core_data(), cls=type(self).__name__) return result def _assert_axioms(self): - """ - Definition of the manager state, that should never be violated. - - """ + """Definition of the manager state, that should never be violated.""" # Ensure there is a valid data state. is_lazy = self._lazy_array is not None is_real = self._real_array is not None - emsg = 'Unexpected data state, got {}lazy and {}real data.' + emsg = "Unexpected data state, got {}lazy and {}real data." state = is_lazy ^ is_real - assert state, emsg.format('' if is_lazy else 'no ', - '' if is_real else 'no ') + assert state, emsg.format("" if is_lazy else "no ", "" if is_real else "no ") def _deepcopy(self, memo, data=None): - """ - Perform a deepcopy of the :class:`~iris._data_manager.DataManager` - instance. - - Args: + """Perform a deepcopy of the :class:`~iris._data_manager.DataManager` instance. - * memo: - :class:`copy` memo dictionary. - - Kwargs: - - * data: + Parameters + ---------- + memo : :func:`copy` + :func:`copy` memo dictionary. + data : optional Replacement data to substitute the currently managed data with. - Returns: - :class:`~iris._data_manager.DataManager` instance. + Returns + ------- + :class:`~iris._data_manager.DataManager` instance. """ try: @@ -196,18 +164,18 @@ def _deepcopy(self, memo, data=None): # without copying it. result = DataManager(data) except ValueError as error: - emsg = 'Cannot copy {!r} - {}' + emsg = "Cannot copy {!r} - {}" raise ValueError(emsg.format(type(self).__name__, error)) return result @property def data(self): - """ - Returns the real data. Any lazy data being managed will be realised. + """Return the real data. Any lazy data being managed will be realised. - Returns: - :class:`~numpy.ndarray` or :class:`numpy.ma.core.MaskedArray`. + Returns + ------- + :class:`~numpy.ndarray` or :class:`numpy.ma.core.MaskedArray`. """ if self.has_lazy_data(): @@ -219,11 +187,13 @@ def data(self): # Reset the lazy data and the realised dtype. self._lazy_array = None except MemoryError: - emsg = ('Failed to realise the lazy data as there was not ' - 'enough memory available.\n' - 'The data shape would have been {!r} with {!r}.\n ' - 'Consider freeing up variables or indexing the data ' - 'before trying again.') + emsg = ( + "Failed to realise the lazy data as there was not " + "enough memory available.\n" + "The data shape would have been {!r} with {!r}.\n " + "Consider freeing up variables or indexing the data " + "before trying again." + ) raise MemoryError(emsg.format(self.shape, self.dtype)) # Check the manager contract, as the managed data has changed. @@ -233,36 +203,36 @@ def data(self): @data.setter def data(self, data): - """ - Replaces the currently managed data with the specified data, which must + """Replace the currently managed data with the specified data. + + Replace the currently managed data with the specified data, which must be of an equivalent shape. Note that, the only shape promotion permitted is for 0-dimensional scalar data to be replaced with a single item 1-dimensional data. - Args: - - * data: + Parameters + ---------- + data : The :class:`~numpy.ndarray` or :class:`~numpy.ma.core.MaskedArray` real data, or :class:`~dask.array.core.Array` lazy data to be managed. """ # Ensure we have numpy-like data. - if not (hasattr(data, 'shape') and hasattr(data, 'dtype')): + if not (hasattr(data, "shape") and hasattr(data, "dtype")): data = np.asanyarray(data) # Determine whether the class instance has been created, # as this method is called from within the __init__. - init_done = (self._lazy_array is not None or - self._real_array is not None) + init_done = self._lazy_array is not None or self._real_array is not None if init_done and self.shape != data.shape: # The _ONLY_ data reshape permitted is converting a 0-dimensional # array i.e. self.shape == () into a 1-dimensional array of length # one i.e. data.shape == (1,) if self.shape or data.shape != (1,): - emsg = 'Require data with shape {!r}, got {!r}.' + emsg = "Require data with shape {!r}, got {!r}." raise ValueError(emsg.format(self.shape, data.shape)) # Set lazy or real data, and reset the other. @@ -285,53 +255,45 @@ def data(self, data): @property def dtype(self): - """ - The dtype of the realised lazy data or the dtype of the real data. - - """ + """The dtype of the realised lazy data or the dtype of the real data.""" return self.core_data().dtype @property def ndim(self): - """ - The number of dimensions covered by the data being managed. - - """ + """The number of dimensions covered by the data being managed.""" return self.core_data().ndim @property def shape(self): - """ - The shape of the data being managed. - - """ + """The shape of the data being managed.""" return self.core_data().shape def copy(self, data=None): - """ - Returns a deep copy of this :class:`~iris._data_manager.DataManager` - instance. - - Kwargs: + """Return a deep copy of this :class:`~iris._data_manager.DataManager` instance. - * data: + Parameters + ---------- + data : optional Replace the data of the copy with this data. - Returns: - A copy :class:`~iris._data_manager.DataManager` instance. + Returns + ------- + A copy :class:`~iris._data_manager.DataManager` instance. """ memo = {} return self._deepcopy(memo, data=data) def core_data(self): - """ + """Provide real data or lazy data. + If real data is being managed, then return the :class:`~numpy.ndarray` or :class:`numpy.ma.core.MaskedArray`. Otherwise, return the lazy :class:`~dask.array.core.Array`. - Returns: - The real or lazy data. + Returns + ------- + The real or lazy data. """ if self.has_lazy_data(): @@ -342,24 +304,24 @@ def core_data(self): return result def has_lazy_data(self): - """ - Determine whether lazy data is being managed. + """Determine whether lazy data is being managed. - Returns: - Boolean. + Returns + ------- + bool """ return self._lazy_array is not None def lazy_data(self): - """ - Return the lazy representation of the managed data. + """Return the lazy representation of the managed data. If only real data is being managed, then return a lazy representation of that real data. - Returns: - :class:`~dask.array.core.Array` + Returns + ------- + :class:`~dask.array.core.Array` .. note:: This method will never realise any lazy data. diff --git a/lib/iris/_deprecation.py b/lib/iris/_deprecation.py index e818bfede1..b771883a71 100644 --- a/lib/iris/_deprecation.py +++ b/lib/iris/_deprecation.py @@ -1,40 +1,25 @@ -# (C) British Crown Copyright 2010 - 2016, Met Office +# Copyright Iris contributors # -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . -""" -Utilities for producing runtime deprecation messages. - -""" -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa -import six +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Utilities for producing runtime deprecation messages.""" import warnings -from iris.exceptions import IrisError - class IrisDeprecation(UserWarning): - """An Iris deprecation warning.""" + """An Iris deprecation warning. + + Note this subclasses UserWarning for backwards compatibility with Iris' + original deprecation warnings. Should subclass DeprecationWarning at the + next major release. + """ + pass def warn_deprecated(msg, stacklevel=2): - """ - Issue an Iris deprecation warning. + """Issue an Iris deprecation warning. Calls :func:`warnings.warn', to emit the message 'msg' as a :class:`warnings.warning`, of the subclass :class:`IrisDeprecationWarning`. @@ -59,7 +44,7 @@ def warn_deprecated(msg, stacklevel=2): >>> """ - warnings.warn(msg, IrisDeprecation, stacklevel=stacklevel) + warnings.warn(msg, category=IrisDeprecation, stacklevel=stacklevel) # A Mixin for a wrapper class that copies the docstring of the wrapped class @@ -74,8 +59,7 @@ def __new__(metacls, classname, bases, class_dict): parent_class = bases[0] # Copy the original class docstring. - class_dict['__doc__'] = parent_class.__doc__ + class_dict["__doc__"] = parent_class.__doc__ # Return the result. - return super(ClassWrapperSameDocstring, metacls).__new__( - metacls, classname, bases, class_dict) + return super().__new__(metacls, classname, bases, class_dict) diff --git a/lib/iris/_lazy_data.py b/lib/iris/_lazy_data.py index 2de93824d8..cd093b315c 100644 --- a/lib/iris/_lazy_data.py +++ b/lib/iris/_lazy_data.py @@ -1,216 +1,303 @@ -# (C) British Crown Copyright 2017 - 2019, Met Office +# Copyright Iris contributors # -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . -""" -Routines for lazy data handling. +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Routines for lazy data handling. To avoid replicating implementation-dependent test and conversion code. """ -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa -from functools import wraps +from functools import lru_cache, wraps +from types import ModuleType +from typing import Sequence import dask import dask.array as da import dask.config import dask.utils - import numpy as np import numpy.ma as ma def non_lazy(func): - """ - Turn a lazy function into a function that returns a result immediately. + """Turn a lazy function into a function that returns a result immediately.""" - """ @wraps(func) def inner(*args, **kwargs): """Immediately return the results of a lazy function.""" result = func(*args, **kwargs) return dask.compute(result)[0] + return inner def is_lazy_data(data): - """ - Return whether the argument is an Iris 'lazy' data array. + """Return whether the argument is an Iris 'lazy' data array. - At present, this means simply a Dask array. + At present, this means simply a :class:`dask.array.Array`. We determine this by checking for a "compute" property. """ - result = hasattr(data, 'compute') + result = hasattr(data, "compute") return result -def _optimum_chunksize(chunks, shape, - limit=None, - dtype=np.dtype('f4')): +def is_masked_data(data: np.ndarray | da.Array) -> bool: + """Return whether the argument is a masked array.""" + return isinstance(da.utils.meta_from_array(data), np.ma.MaskedArray) + + +def is_lazy_masked_data(data): + """Determine whether managed data is lazy and masked. + + Return True if the argument is both an Iris 'lazy' data array and the + underlying array is of masked type. Otherwise return False. + """ + return is_lazy_data(data) and is_masked_data(data) + + +@lru_cache +def _optimum_chunksize_internals( + chunks, + shape, + limit=None, + dtype=np.dtype("f4"), + dims_fixed=None, + dask_array_chunksize=dask.config.get("array.chunk-size"), +): + """Reduce or increase an initial chunk shap. + Reduce or increase an initial chunk shape to get close to a chosen ideal size, while prioritising the splitting of the earlier (outer) dimensions and keeping intact the later (inner) ones. - Args: - - * chunks (tuple of int, or None): - Pre-existing chunk shape of the target data : None if unknown. - * shape (tuple of int): + Parameters + ---------- + chunks : tuple of int + Pre-existing chunk shape of the target data. + shape : tuple of int The full array shape of the target data. - * limit (int): - The 'ideal' target chunk size, in bytes. Default from dask.config. - * dtype (np.dtype): + limit : int, optional + The 'ideal' target chunk size, in bytes. Default from + :mod:`dask.config`. + dtype : np.dtype Numpy dtype of target data. - - Returns: - * chunk (tuple of int): + dims_fixed : list of bool, optional + If set, a list of values equal in length to 'chunks' or 'shape'. + 'True' values indicate a dimension that can not be changed, i.e. that + element of the result must equal the corresponding value in 'chunks' or + data.shape. + + Returns + ------- + tuple of int The proposed shape of one full chunk. - .. note:: - The purpose of this is very similar to - `dask.array.core.normalize_chunks`, when called as - `(chunks='auto', shape, dtype=dtype, previous_chunks=chunks, ...)`. - Except, the operation here is optimised specifically for a 'c-like' - dimension order, i.e. outer dimensions first, as for netcdf variables. - So if, in future, this policy can be implemented in dask, then we would - prefer to replace this function with a call to that one. - Accordingly, the arguments roughly match 'normalize_chunks', except - that we don't support the alternative argument forms of that routine. - The return value, however, is a single 'full chunk', rather than a - complete chunking scheme : so an equivalent code usage could be - "chunks = [c[0] for c in normalise_chunks('auto', ...)]". + Notes + ----- + The purpose of this is very similar to + :func:`dask.array.core.normalize_chunks`, when called as + `(chunks='auto', shape, dtype=dtype, previous_chunks=chunks, ...)`. + Except, the operation here is optimised specifically for a 'c-like' + dimension order, i.e. outer dimensions first, as for netcdf variables. + So if, in future, this policy can be implemented in dask, then we would + prefer to replace this function with a call to that one. + Accordingly, the arguments roughly match 'normalize_chunks', except + that we don't support the alternative argument forms of that routine. + The return value, however, is a single 'full chunk', rather than a + complete chunking scheme : so an equivalent code usage could be + "chunks = [c[0] for c in normalise_chunks('auto', ...)]". """ # Set the chunksize limit. if limit is None: # Fetch the default 'optimal' chunksize from the dask config. - limit = dask.config.get('array.chunk-size') + limit = dask_array_chunksize # Convert to bytes limit = dask.utils.parse_bytes(limit) point_size_limit = limit / dtype.itemsize - # Create result chunks, starting with a copy of the input. - result = list(chunks) - - if np.prod(result) < point_size_limit: - # If size is less than maximum, expand the chunks, multiplying later - # (i.e. inner) dims first. - i_expand = len(shape) - 1 - while np.prod(result) < point_size_limit and i_expand >= 0: - factor = np.floor(point_size_limit * 1.0 / np.prod(result)) - new_dim = result[i_expand] * int(factor) - if new_dim >= shape[i_expand]: - # Clip to dim size : chunk dims must not exceed the full shape. - new_dim = shape[i_expand] - else: - # 'new_dim' is less than the relevant dim of 'shape' -- but it - # is also the largest possible multiple of the input-chunks, - # within the size limit. - # So : 'i_expand' is the outer (last) dimension over which we - # will multiply the input chunks, and 'new_dim' is a value that - # ensures the fewest possible chunks within that dim. - - # Now replace 'new_dim' with the value **closest to equal-size - # chunks**, for the same (minimum) number of chunks. - # More-equal chunks are practically better. - # E.G. : "divide 8 into multiples of 2, with a limit of 7", - # produces new_dim=6, which would mean chunks of sizes (6, 2). - # But (4, 4) is clearly better for memory and time cost. - - # Calculate how many (expanded) chunks fit into this dimension. - dim_chunks = np.ceil(shape[i_expand] * 1. / new_dim) - # Get "ideal" (equal) size for that many chunks. - ideal_equal_chunk_size = shape[i_expand] / dim_chunks - # Use the nearest whole multiple of input chunks >= ideal. - new_dim = int(result[i_expand] * - np.ceil(ideal_equal_chunk_size / - result[i_expand])) - - result[i_expand] = new_dim - i_expand -= 1 - else: - # Similarly, reduce if too big, reducing earlier (outer) dims first. - i_reduce = 0 - while np.prod(result) > point_size_limit: - factor = np.ceil(np.prod(result) / point_size_limit) - new_dim = int(result[i_reduce] / factor) - if new_dim < 1: - new_dim = 1 - result[i_reduce] = new_dim - i_reduce += 1 - - return tuple(result) + if dims_fixed is not None: + if not np.any(dims_fixed): + dims_fixed = None + if dims_fixed is None: + # Get initial result chunks, starting with a copy of the input. + working = list(chunks) + else: + # Adjust the operation to ignore the 'fixed' dims. + # (We reconstruct the original later, before return). + chunks = np.array(chunks) + dims_fixed_arr = np.array(dims_fixed) + # Reduce the target size by the fixed size of all the 'fixed' dims. + point_size_limit = point_size_limit // np.prod(chunks[dims_fixed_arr]) + # Work on only the 'free' dims. + original_shape = tuple(shape) + shape = tuple(np.array(shape)[~dims_fixed_arr]) + working = list(chunks[~dims_fixed_arr]) + + if len(working) >= 1: + if np.prod(working) < point_size_limit: + # If size is less than maximum, expand the chunks, multiplying + # later (i.e. inner) dims first. + i_expand = len(shape) - 1 + while np.prod(working) < point_size_limit and i_expand >= 0: + factor = np.floor(point_size_limit * 1.0 / np.prod(working)) + new_dim = working[i_expand] * int(factor) + if new_dim >= shape[i_expand]: + # Clip to dim size : must not exceed the full shape. + new_dim = shape[i_expand] + else: + # 'new_dim' is less than the relevant dim of 'shape' -- but + # it is also the largest possible multiple of the + # input-chunks, within the size limit. + # So : 'i_expand' is the outer (last) dimension over which + # we will multiply the input chunks, and 'new_dim' is a + # value giving the fewest possible chunks within that dim. + + # Now replace 'new_dim' with the value **closest to + # equal-size chunks**, for the same (minimum) number of + # chunks. More-equal chunks are practically better. + # E.G. : "divide 8 into multiples of 2, with a limit of 7", + # produces new_dim=6, meaning chunks of sizes (6, 2). + # But (4, 4) is clearly better for memory and time cost. + + # Calculate how many (expanded) chunks fit in this dim. + dim_chunks = np.ceil(shape[i_expand] * 1.0 / new_dim) + # Get "ideal" (equal) size for that many chunks. + ideal_equal_chunk_size = shape[i_expand] / dim_chunks + # Use the nearest whole multiple of input chunks >= ideal. + new_dim = int( + working[i_expand] + * np.ceil(ideal_equal_chunk_size / working[i_expand]) + ) + + working[i_expand] = new_dim + i_expand -= 1 + else: + # Similarly, reduce if too big, reducing earlier (outer) dims first. + i_reduce = 0 + while np.prod(working) > point_size_limit: + factor = np.ceil(np.prod(working) / point_size_limit) + new_dim = int(working[i_reduce] / factor) + if new_dim < 1: + new_dim = 1 + working[i_reduce] = new_dim + i_reduce += 1 + + working = tuple(working) + + if dims_fixed is None: + result = working + else: + # Reconstruct the original form + result = [] + for i_dim in range(len(original_shape)): + if dims_fixed[i_dim]: + dim = chunks[i_dim] + else: + dim = working[0] + working = working[1:] + result.append(dim) -def as_lazy_data(data, chunks=None, asarray=False): - """ - Convert the input array `data` to a dask array. + return result - Args: - * data (array-like): +@wraps(_optimum_chunksize_internals) +def _optimum_chunksize( + chunks, + shape, + limit=None, + dtype=np.dtype("f4"), + dims_fixed=None, +): + # By providing dask_array_chunksize as an argument, we make it so that the + # output of _optimum_chunksize_internals depends only on its arguments (and + # thus we can use lru_cache) + return _optimum_chunksize_internals( + tuple(chunks), + tuple(shape), + limit=limit, + dtype=dtype, + dims_fixed=dims_fixed, + dask_array_chunksize=dask.config.get("array.chunk-size"), + ) + + +def as_lazy_data(data, chunks=None, asarray=False, meta=None, dims_fixed=None): + """Convert the input array `data` to a :class:`dask.array.Array`. + + Parameters + ---------- + data : array-like An indexable object with 'shape', 'dtype' and 'ndim' properties. - This will be converted to a dask array. - - Kwargs: - - * chunks (list of int): + This will be converted to a :class:`dask.array.Array`. + chunks : list of int, optional If present, a source chunk shape, e.g. for a chunked netcdf variable. - - * asarray (bool): + If set to "auto", Iris chunking optimisation will be bypassed, and dask's + default chunking will be used instead. + asarray : bool, default=False If True, then chunks will be converted to instances of `ndarray`. Set to False (default) to pass passed chunks through unchanged. - - Returns: - The input array converted to a dask array. - - .. note:: - The result chunk size is a multiple of 'chunks', if given, up to the - dask default chunksize, i.e. `dask.config.get('array.chunk-size'), - or the full data shape if that is smaller. - If 'chunks' is not given, the result has chunks of the full data shape, - but reduced by a factor if that exceeds the dask default chunksize. + meta : numpy.ndarray, optional + Empty ndarray created with same NumPy backend, ndim and dtype as the + Dask Array being created. + dims_fixed : list of bool, optional + If set, a list of values equal in length to 'chunks' or data.ndim. + 'True' values indicate a dimension which can not be changed, i.e. the + result for that index must equal the value in 'chunks' or data.shape. + + Returns + ------- + :class:`dask.array.Array` + The input array converted to a :class:`dask.array.Array`. + + Notes + ----- + The result chunk size is a multiple of 'chunks', if given, up to the + dask default chunksize, i.e. `dask.config.get('array.chunk-size')`, + or the full data shape if that is smaller. + If 'chunks' is not given, the result has chunks of the full data shape, + but reduced by a factor if that exceeds the dask default chunksize. """ - if chunks is None: - # No existing chunks : Make a chunk the shape of the entire input array - # (but we will subdivide it if too big). - chunks = list(data.shape) - - # Adjust chunk size for better dask performance, - # NOTE: but only if no shape dimension is zero, so that we can handle the - # PPDataProxy of "raw" landsea-masked fields, which have a shape of (0, 0). - if all(elem > 0 for elem in data.shape): - # Expand or reduce the basic chunk shape to an optimum size. - chunks = _optimum_chunksize(chunks, shape=data.shape, dtype=data.dtype) - if isinstance(data, ma.core.MaskedConstant): data = ma.masked_array(data.data, mask=data.mask) + + if meta is None and not isinstance(data, (np.ndarray, da.Array)): + raise ValueError( + "For performance reasons, `meta` cannot be `None` if `data` is " + "anything other than a Numpy or Dask array." + ) + + if chunks != "auto": + if chunks is None: + # No existing chunks : Make a chunk the shape of the entire input array + # (but we will subdivide it if too big). + chunks = list(data.shape) + + # Adjust chunk size for better dask performance, + # NOTE: but only if no shape dimension is zero, so that we can handle the + # PPDataProxy of "raw" landsea-masked fields, which have a shape of (0, 0). + if all(elem > 0 for elem in data.shape): + # Expand or reduce the basic chunk shape to an optimum size. + chunks = _optimum_chunksize( + chunks, + shape=data.shape, + dtype=data.dtype, + dims_fixed=dims_fixed, + ) + if not is_lazy_data(data): - data = da.from_array(data, chunks=chunks, asarray=asarray) + data = da.from_array(data, chunks=chunks, asarray=asarray, meta=meta) return data def _co_realise_lazy_arrays(arrays): - """ - Compute multiple lazy arrays and return a list of real values. + """Compute multiple lazy arrays and return a list of real values. All the arrays are computed together, so they can share results for common graph elements. @@ -235,77 +322,169 @@ def _co_realise_lazy_arrays(arrays): # Convert any masked constants into NumPy masked arrays. # NOTE: in this case, also apply the original lazy-array dtype, as # masked constants *always* have dtype float64. - real_out = ma.masked_array(real_out.data, mask=real_out.mask, - dtype=lazy_in.dtype) + real_out = ma.masked_array( + real_out.data, mask=real_out.mask, dtype=lazy_in.dtype + ) results.append(real_out) return results def as_concrete_data(data): - """ + """Return the actual content of a lazy array, as a numpy array. + Return the actual content of a lazy array, as a numpy array. If the input data is a NumPy `ndarray` or masked array, return it unchanged. If the input data is lazy, return the realised result. - Args: - - * data: - A dask array, NumPy `ndarray` or masked array + Parameters + ---------- + data : + A dask array, NumPy `ndarray` or masked array. - Returns: - A NumPy `ndarray` or masked array. + Returns + ------- + NumPy `ndarray` or masked array. """ if is_lazy_data(data): - data, = _co_realise_lazy_arrays([data]) + (data,) = _co_realise_lazy_arrays([data]) return data -def multidim_lazy_stack(stack): +def _combine( + arrays: Sequence[da.Array | np.ndarray], + operation: str, + **kwargs, +) -> da.Array | np.ndarray: + """Combine multiple arrays into a single array. + + Provides enhanced versions of :func:`~dask.array.concatenate` or :func:`~dask.array.stack`, + which ensure that all computed results are masked-array, if the combined .meta is masked. + + Parameters + ---------- + arrays : + The arrays to combine. + operation : + The combination operation to apply. + **kwargs : + Any keyword arguments to pass to the combination operation. + + """ + lazy = any(is_lazy_data(a) for a in arrays) + masked = any(is_masked_data(a) for a in arrays) + + array_module: ModuleType = np + if masked: + if lazy: + # Avoid inconsistent array type when slicing resulting array + arrays = tuple( + a if is_lazy_masked_data(a) else da.ma.masked_array(a) for a in arrays + ) + else: + # Avoid dropping the masks + array_module = np.ma + + func = getattr(array_module, operation) + return func(arrays, **kwargs) + + +def concatenate( + arrays: Sequence[da.Array | np.ndarray], + axis: int = 0, +) -> da.Array | np.ndarray: + """Concatenate a sequence of arrays along a new axis. + + Improves on the regular :func:`dask.array.concatenate` by always respecting a masked + ``.meta``, as described for :func:`_combine`. + + Parameters + ---------- + arrays : + The arrays must have the same shape, except in the dimension + corresponding to `axis` (the first, by default). + axis : + Dimension along which to align all of the arrays. If axis is None, + arrays are flattened before use. + + Returns + ------- + The concatenated array. + """ - Recursively build a multidimensional stacked dask array. + return _combine(arrays, operation="concatenate", axis=axis) + - This is needed because dask.array.stack only accepts a 1-dimensional list. +def stack( + arrays: Sequence[da.Array | np.ndarray], + axis: int = 0, +) -> da.Array | np.ndarray: + """Stack a sequence of arrays along a new axis. - Args: + Improves on the regular :func:`dask.array.stack` by always respecting a masked + ``.meta``, as described for :func:`_combine`. - * stack: - An ndarray of dask arrays. + Parameters + ---------- + arrays : + The arrays must have the same shape. + axis : + Dimension along which to align all of the arrays. - Returns: - The input array converted to a lazy dask array. + Returns + ------- + The stacked array. """ - if stack.ndim == 0: + return _combine(arrays, operation="stack", axis=axis) + + +def multidim_lazy_stack(arr): + """Recursively build a multidimensional stacked dask array. + + This is needed because :meth:`dask.array.Array.stack` only accepts a + 1-dimensional list. + + Parameters + ---------- + arr : + An ndarray of :class:`dask.array.Array`. + + Returns + ------- + The input array converted to a lazy :class:`dask.array.Array`. + + """ + if arr.ndim == 0: # A 0-d array cannot be stacked. - result = stack.item() - elif stack.ndim == 1: + result = arr.item() + elif arr.ndim == 1: # Another base case : simple 1-d goes direct in dask. - result = da.stack(list(stack)) + result = stack(list(arr)) else: # Recurse because dask.stack does not do multi-dimensional. - result = da.stack([multidim_lazy_stack(subarray) - for subarray in stack]) + result = stack([multidim_lazy_stack(subarray) for subarray in arr]) return result def co_realise_cubes(*cubes): - """ - Fetch 'real' data for multiple cubes, in a shared calculation. + """Fetch 'real' data for multiple cubes, in a shared calculation. This computes any lazy data, equivalent to accessing each `cube.data`. However, lazy calculations and data fetches can be shared between the computations, improving performance. - Args: - - * cubes (list of :class:`~iris.cube.Cube`): + Parameters + ---------- + cubes : list of :class:`~iris.cube.Cube` Arguments, each of which is a cube to be realised. - For example:: + Examples + -------- + :: # Form stats. a_std = cube_a.collapsed(['x', 'y'], iris.analysis.STD_DEV) @@ -318,9 +497,9 @@ def co_realise_cubes(*cubes): co_realise_cubes(a_std, b_std, ab_mean_diff, std_err) - .. Note:: + .. note:: - Cubes with non-lazy data may also be passed, with no ill effect. + Cubes with non-lazy data may also be passed, with no ill effect. """ results = _co_realise_lazy_arrays([cube.core_data() for cube in cubes]) @@ -329,19 +508,20 @@ def co_realise_cubes(*cubes): def lazy_elementwise(lazy_array, elementwise_op): - """ - Apply a (numpy-style) elementwise array operation to a lazy array. + """Apply a (numpy-style) elementwise array operation to a lazy array. Elementwise means that it performs a independent calculation at each point of the input, producing a result array of the same shape. - Args: - - * lazy_array: + Parameters + ---------- + lazy_array : The lazy array object to operate on. - * elementwise_op: + elementwise_op : The elementwise operation, a function operating on numpy arrays. + Notes + ----- .. note: A single-point "dummy" call is made to the operation function, to @@ -359,3 +539,65 @@ def lazy_elementwise(lazy_array, elementwise_op): dtype = elementwise_op(np.zeros(1, lazy_array.dtype)).dtype return da.map_blocks(elementwise_op, lazy_array, dtype=dtype) + + +def map_complete_blocks(src, func, dims, out_sizes, *args, **kwargs): + """Apply a function to complete blocks. + + Complete means that the data is not chunked along the chosen dimensions. + Uses :func:`dask.array.map_blocks` to implement the mapping. + + Parameters + ---------- + src : :class:`~iris.cube.Cube` or array-like + Source cube that function is applied to. + func : + Function to apply. + dims : tuple of int + Dimensions that cannot be chunked. + out_sizes : tuple of int + Output size of dimensions that cannot be chunked. + *args : tuple + Additional arguments to pass to `func`. + **kwargs : dict + Additional keyword arguments to pass to `func`. + + Returns + ------- + Array-like + + See Also + -------- + :func:`dask.array.map_blocks` : The function used for the mapping. + + """ + data = None + result = None + + if is_lazy_data(src): + data = src + elif not hasattr(src, "has_lazy_data"): + # Not a lazy array and not a cube. So treat as ordinary numpy array. + result = func(src, *args, **kwargs) + elif not src.has_lazy_data(): + result = func(src.data, *args, **kwargs) + else: + data = src.lazy_data() + + if result is None and data is not None: + # Ensure dims are not chunked + in_chunks = list(data.chunks) + for dim in dims: + in_chunks[dim] = src.shape[dim] + data = data.rechunk(in_chunks) + + # Determine output chunks + out_chunks = list(data.chunks) + for dim, size in zip(dims, out_sizes): + out_chunks[dim] = size + + result = data.map_blocks( + func, *args, chunks=out_chunks, dtype=src.dtype, **kwargs + ) + + return result diff --git a/lib/iris/_merge.py b/lib/iris/_merge.py index a1eafdbc3a..2d8beb6f27 100644 --- a/lib/iris/_merge.py +++ b/lib/iris/_merge.py @@ -1,41 +1,31 @@ -# (C) British Crown Copyright 2010 - 2017, Met Office +# Copyright Iris contributors # -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . -""" -Automatic collation of cubes into higher-dimensional cubes. +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Automatic collation of cubes into higher-dimensional cubes. Typically the cube merge process is handled by -:method:`iris.cube.CubeList.merge`. +:meth:`iris.cube.CubeList.merge`. """ -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa -import six - -from collections import namedtuple, OrderedDict +from collections import OrderedDict, namedtuple from copy import deepcopy import numpy as np -import numpy.ma as ma -from iris._lazy_data import (as_lazy_data, as_concrete_data, is_lazy_data, - multidim_lazy_stack) -import iris.cube +from iris._lazy_data import ( + as_concrete_data, + as_lazy_data, + is_lazy_data, + multidim_lazy_stack, +) +from iris.common import CoordMetadata, CubeMetadata +from iris.common._split_attribute_dicts import ( + _convert_splitattrs_to_pairedkeys_dict as convert_splitattrs_to_pairedkeys_dict, +) import iris.coords +import iris.cube import iris.exceptions import iris.util @@ -43,26 +33,21 @@ # # Private namedtuple wrapper classes. # -class _Template(namedtuple('Template', - ['dims', 'points', 'bounds', 'kwargs'])): - """ - Common framework from which to build a dimension or auxiliary coordinate. - - Args: +class _Template(namedtuple("Template", ["dims", "points", "bounds", "kwargs"])): + """Common framework from which to build a dimension or auxiliary coordinate. - * dims: + Parameters + ---------- + dims : Tuple of the associated :class:`iris.cube.Cube` data dimension/s spanned by this coordinate template. - - * points: + points : A :mod:`numpy` array representing the coordinate point data. No points data is represented by None. - - * bounds: + bounds : A :mod:`numpy` array representing the coordinate bounds data. No bounds data is represented by None. - - * kwargs: + kwargs : A dictionary of key/value pairs required to create a coordinate. """ @@ -70,22 +55,20 @@ class _Template(namedtuple('Template', __slots__ = () -class _CoordMetaData(namedtuple('CoordMetaData', - ['points_dtype', 'bounds_dtype', 'kwargs'])): - """ - Bespoke metadata required to build a dimension or auxiliary coordinate. - - Args: +class _CoordMetaData( + namedtuple("CoordMetaData", ["points_dtype", "bounds_dtype", "kwargs"]) +): + """Bespoke metadata required to build a dimension or auxiliary coordinate. - * points_dtype: + Parameters + ---------- + points_dtype : The points data :class:`numpy.dtype` of an associated coordinate. None otherwise. - - * bounds_dtype: + bounds_dtype : The bounds data :class:`numpy.dtype` of an associated coordinate. None otherwise. - - * kwargs: + kwargs : A dictionary of key/value pairs required to create a coordinate. """ @@ -93,19 +76,18 @@ class _CoordMetaData(namedtuple('CoordMetaData', __slots__ = () -class _CoordAndDims(namedtuple('CoordAndDims', - ['coord', 'dims'])): - """ +class _CoordAndDims(namedtuple("CoordAndDims", ["coord", "dims"])): + """Container for a coordinate and the associated data dimension/s. + Container for a coordinate and the associated data dimension/s spanned over a :class:`iris.cube.Cube`. - Args: - - * coord: + Parameters + ---------- + coord : A :class:`iris.coords.DimCoord` or :class:`iris.coords.AuxCoord` coordinate instance. - - * dims: + dims : A tuple of the data dimension/s spanned by the coordinate. """ @@ -113,27 +95,27 @@ class _CoordAndDims(namedtuple('CoordAndDims', __slots__ = () -class _ScalarCoordPayload(namedtuple('ScalarCoordPayload', - ['defns', 'values', 'metadata'])): - """ +class _ScalarCoordPayload( + namedtuple("ScalarCoordPayload", ["defns", "values", "metadata"]) +): + """Container for the payload. + Container for all scalar coordinate data and metadata represented within a :class:`iris.cube.Cube`. All scalar coordinate related data is sorted into ascending order of the associated coordinate definition. - Args: - - * defns: - A list of scalar coordinate definitions :class:`iris.coords.CoordDefn` + Parameters + ---------- + defns : + A list of scalar coordinate metadata :class:`iris.common.CoordMetadata` belonging to a :class:`iris.cube.Cube`. - - * values: + values : A list of scalar coordinate values belonging to a :class:`iris.cube.Cube`. Each scalar coordinate value is typically an :class:`iris.coords.Cell`. - - * metadata: + metadata : A list of :class:`_CoordMetaData` instances belonging to a :class:`iris.cube.Cube`. @@ -142,22 +124,22 @@ class _ScalarCoordPayload(namedtuple('ScalarCoordPayload', __slots__ = () -class _VectorCoordPayload(namedtuple('VectorCoordPayload', - ['dim_coords_and_dims', - 'aux_coords_and_dims'])): - """ +class _VectorCoordPayload( + namedtuple("VectorCoordPayload", ["dim_coords_and_dims", "aux_coords_and_dims"]) +): + """Container for the payload. + Container for all vector coordinate data and metadata represented within a :class:`iris.cube.Cube`. - Args: - - * dim_coords_and_dims: + Parameters + ---------- + dim_coords_and_dims : A list of :class:`_CoordAndDim` instances containing non-scalar (i.e. multi-valued) :class:`iris.coords.DimCoord` instances and the associated data dimension spanned by them for a :class:`iris.cube.Cube`. - - * aux_coords_and_dims: + aux_coords_and_dims : A list of :class:`_CoordAndDim` instances containing non-scalar (i.e. multi-valued) :class:`iris.coords.DimCoord` and/or :class:`iris.coords.AuxCoord` instances and the associated data @@ -168,9 +150,9 @@ class _VectorCoordPayload(namedtuple('VectorCoordPayload', __slots__ = () -class _CoordPayload(namedtuple('CoordPayload', - ['scalar', 'vector', 'factory_defns'])): - """ +class _CoordPayload(namedtuple("CoordPayload", ["scalar", "vector", "factory_defns"])): + """Container for the payload. + Container for all the scalar and vector coordinate data and metadata, and auxiliary coordinate factories represented within a :class:`iris.cube.Cube`. @@ -178,15 +160,13 @@ class _CoordPayload(namedtuple('CoordPayload', All scalar coordinate and factory related data is sorted into ascending order of the associated coordinate definition. - Args: - - * scalar: + Parameters + ---------- + scalar : A :class:`_ScalarCoordPayload` instance. - - * vector: + vector : A :class:`_VectorCoordPayload` instance. - - * factory_defns: + factory_defns : A list of :class:`_FactoryDefn` instances. """ @@ -195,11 +175,12 @@ class _CoordPayload(namedtuple('CoordPayload', def as_signature(self): """Construct and return a :class:`_CoordSignature` from the payload.""" - - return _CoordSignature(self.scalar.defns, - self.vector.dim_coords_and_dims, - self.vector.aux_coords_and_dims, - self.factory_defns) + return _CoordSignature( + self.scalar.defns, + self.vector.dim_coords_and_dims, + self.vector.aux_coords_and_dims, + self.factory_defns, + ) @staticmethod def _coords_msgs(msgs, coord_group, defns_a, defns_b): @@ -215,31 +196,38 @@ def _coords_msgs(msgs, coord_group, defns_a, defns_b): diff_defns.extend(defns_b) if diff_defns: names = sorted(set(defn.name() for defn in diff_defns)) - msgs.append('Coordinates in {} differ: {}.'.format( - coord_group, ', '.join(names))) + msgs.append( + "Coordinates in {} differ: {}.".format( + coord_group, ", ".join(names) + ) + ) else: - msgs.append('Coordinates in {} differ by dtype or class' - ' (i.e. DimCoord vs AuxCoord).'.format( - coord_group)) + msgs.append( + "Coordinates in {} differ by dtype or class" + " (i.e. DimCoord vs AuxCoord).".format(coord_group) + ) def match_signature(self, signature, error_on_mismatch): - """ + """Check if _CoordPayload matches the corresponding aspects of a _CoordSignature. + Return whether this _CoordPayload matches the corresponding aspects of a _CoordSignature. - Args: - - * signature (_CoordSignature): + Parameters + ---------- + signature : _CoordSignature The _CoordSignature to compare against. - - * error_on_mismatch (bool): + error_on_mismatch : bool If True, raise an Exception with detailed explanation. - Returns: - Boolean. True if and only if this _CoordPayload matches - the corresponding aspects `other`. + Returns + ------- + bool + True if and only if this _CoordPayload matches + the corresponding aspects `other`. """ + def unzip(coords_and_dims): if coords_and_dims: coords, dims = zip(*coords_and_dims) @@ -250,27 +238,33 @@ def unzip(coords_and_dims): def dims_msgs(msgs, coord_group, dimlists_a, dimlists_b): if dimlists_a != dimlists_b: msgs.append( - 'Coordinate-to-dimension mapping differs for {}.'.format( - coord_group)) + "Coordinate-to-dimension mapping differs for {}.".format( + coord_group + ) + ) msgs = [] - self._coords_msgs(msgs, 'cube.aux_coords (scalar)', self.scalar.defns, - signature.scalar_defns) - - coord_group = 'cube.dim_coords' + self._coords_msgs( + msgs, + "cube.aux_coords (scalar)", + self.scalar.defns, + signature.scalar_defns, + ) + + coord_group = "cube.dim_coords" self_coords, self_dims = unzip(self.vector.dim_coords_and_dims) other_coords, other_dims = unzip(signature.vector_dim_coords_and_dims) self._coords_msgs(msgs, coord_group, self_coords, other_coords) dims_msgs(msgs, coord_group, self_dims, other_dims) - coord_group = 'cube.aux_coords (non-scalar)' + coord_group = "cube.aux_coords (non-scalar)" self_coords, self_dims = unzip(self.vector.aux_coords_and_dims) other_coords, other_dims = unzip(signature.vector_aux_coords_and_dims) self._coords_msgs(msgs, coord_group, self_coords, other_coords) dims_msgs(msgs, coord_group, self_dims, other_dims) if self.factory_defns != signature.factory_defns: - msgs.append('cube.aux_factories() differ') + msgs.append("cube.aux_factories() differ") match = not bool(msgs) if error_on_mismatch and not match: @@ -278,34 +272,38 @@ def dims_msgs(msgs, coord_group, dimlists_a, dimlists_b): return match -class _CoordSignature(namedtuple('CoordSignature', - ['scalar_defns', - 'vector_dim_coords_and_dims', - 'vector_aux_coords_and_dims', - 'factory_defns'])): - """ +class _CoordSignature( + namedtuple( + "CoordSignature", + [ + "scalar_defns", + "vector_dim_coords_and_dims", + "vector_aux_coords_and_dims", + "factory_defns", + ], + ) +): + """Criterion for identifying a specific type of :class:`iris.cube.Cube`. + Criterion for identifying a specific type of :class:`iris.cube.Cube` - based on its scalar and vector coorinate data and metadata, and + based on its scalar and vector coordinate data and metadata, and auxiliary coordinate factories. - Args: - - * scalar_defns: + Parameters + ---------- + scalar_defns : A list of scalar coordinate definitions sorted into ascending order. - - * vector_dim_coords_and_dims: + vector_dim_coords_and_dims : A list of :class:`_CoordAndDim` instances containing non-scalar (i.e. multi-valued) :class:`iris.coords.DimCoord` instances and the associated data dimension spanned by them for a :class:`iris.cube.Cube`. - - * vector_aux_coords_and_dims: + vector_aux_coords_and_dims : A list of :class:`_CoordAndDim` instances containing non-scalar (i.e. multi-valued) :class:`iris.coords.DimCoord` and/or :class:`iris.coords.AuxCoord` instances and the associated data dimension/s spanned by them for a :class:`iris.cube.Cube`. - - * factory_defns: + factory_defns : A list of :class:`_FactoryDefn` instances. """ @@ -313,26 +311,32 @@ class _CoordSignature(namedtuple('CoordSignature', __slots__ = () -class _CubeSignature(namedtuple('CubeSignature', - ['defn', 'data_shape', 'data_type', - 'cell_measures_and_dims'])): - """ - Criterion for identifying a specific type of :class:`iris.cube.Cube` - based on its metadata. - - Args: - - * defn: +class _CubeSignature( + namedtuple( + "CubeSignature", + [ + "defn", + "data_shape", + "data_type", + "cell_measures_and_dims", + "ancillary_variables_and_dims", + ], + ) +): + """Criterion for identifying specific type of :class:`iris.cube.Cube` based on its metadata. + + Parameters + ---------- + defn : A cube definition tuple. - - * data_shape: + data_shape : The data payload shape of a :class:`iris.cube.Cube`. - - * data_type: + data_type : The data payload :class:`numpy.dtype` of a :class:`iris.cube.Cube`. - - * cell_measures_and_dims: + cell_measures_and_dims : A list of cell_measures and dims for the cube. + ancillary_variables_and_dims : + A list of ancillary variables and dims for the cube. """ @@ -342,69 +346,98 @@ def _defn_msgs(self, other_defn): msgs = [] self_defn = self.defn if self_defn.standard_name != other_defn.standard_name: - msgs.append('cube.standard_name differs: {!r} != {!r}'.format( - self_defn.standard_name, other_defn.standard_name)) + msgs.append( + "cube.standard_name differs: {!r} != {!r}".format( + self_defn.standard_name, other_defn.standard_name + ) + ) if self_defn.long_name != other_defn.long_name: - msgs.append('cube.long_name differs: {!r} != {!r}'.format( - self_defn.long_name, other_defn.long_name)) + msgs.append( + "cube.long_name differs: {!r} != {!r}".format( + self_defn.long_name, other_defn.long_name + ) + ) if self_defn.var_name != other_defn.var_name: - msgs.append('cube.var_name differs: {!r} != {!r}'.format( - self_defn.var_name, other_defn.var_name)) + msgs.append( + "cube.var_name differs: {!r} != {!r}".format( + self_defn.var_name, other_defn.var_name + ) + ) if self_defn.units != other_defn.units: - msgs.append('cube.units differs: {!r} != {!r}'.format( - self_defn.units, other_defn.units)) + msgs.append( + "cube.units differs: {!r} != {!r}".format( + self_defn.units, other_defn.units + ) + ) if self_defn.attributes != other_defn.attributes: - diff_keys = (set(self_defn.attributes.keys()) ^ - set(other_defn.attributes.keys())) + attrs_1, attrs_2 = self_defn.attributes, other_defn.attributes + diff_keys = sorted( + set(attrs_1.globals) ^ set(attrs_2.globals) + | set(attrs_1.locals) ^ set(attrs_2.locals) + ) if diff_keys: - msgs.append('cube.attributes keys differ: ' + - ', '.join(repr(key) for key in diff_keys)) + msgs.append( + "cube.attributes keys differ: " + + ", ".join(repr(key) for key in diff_keys) + ) else: - diff_attrs = [repr(key) for key in self_defn.attributes - if np.all(self_defn.attributes[key] != - other_defn.attributes[key])] - diff_attrs = ', '.join(diff_attrs) + attrs_1, attrs_2 = [ + convert_splitattrs_to_pairedkeys_dict(dic) + for dic in (attrs_1, attrs_2) + ] + diff_attrs = [ + repr(key[1]) + for key in attrs_1 + if np.all(attrs_1[key] != attrs_2[key]) + ] + diff_attrs = ", ".join(sorted(diff_attrs)) msgs.append( - 'cube.attributes values differ for keys: {}'.format( - diff_attrs)) + "cube.attributes values differ for keys: {}".format(diff_attrs) + ) if self_defn.cell_methods != other_defn.cell_methods: - msgs.append('cube.cell_methods differ') + msgs.append("cube.cell_methods differ") return msgs def match(self, other, error_on_mismatch): - """ - Return whether this _CubeSignature equals another. + """Return whether this _CubeSignature equals another. This is the first step to determine if two "cubes" (either a real Cube or a ProtoCube) can be merged, by considering: - - standard_name, long_name, var_name - - units - - attributes - - cell_methods - - shape, dtype - Args: + * standard_name, long_name, var_name + * units + * attributes + * cell_methods + * shape, dtype - * other (_CubeSignature): + Parameters + ---------- + other : _CubeSignature The _CubeSignature to compare against. - - * error_on_mismatch (bool): + error_on_mismatch : bool If True, raise a :class:`~iris.exceptions.MergeException` with a detailed explanation if the two do not match. - Returns: - Boolean. True if and only if this _CubeSignature matches `other`. + Returns + ------- + bool + True if and only if this _CubeSignature matches `other`. """ msgs = self._defn_msgs(other.defn) if self.data_shape != other.data_shape: - msg = 'cube.shape differs: {} != {}' + msg = "cube.shape differs: {} != {}" msgs.append(msg.format(self.data_shape, other.data_shape)) if self.data_type != other.data_type: - msg = 'cube data dtype differs: {} != {}' + msg = "cube data dtype differs: {} != {}" msgs.append(msg.format(self.data_type, other.data_type)) - if (self.cell_measures_and_dims != other.cell_measures_and_dims): - msgs.append('cube.cell_measures differ') + # Both cell_measures_and_dims and ancillary_variables_and_dims are + # ordered by the same method, it is therefore not possible for a + # mismatch to be caused by a difference in order. + if self.cell_measures_and_dims != other.cell_measures_and_dims: + msgs.append("cube.cell_measures differ") + if self.ancillary_variables_and_dims != other.ancillary_variables_and_dims: + msgs.append("cube.ancillary_variables differ") match = not bool(msgs) if error_on_mismatch and not match: @@ -412,21 +445,20 @@ def match(self, other, error_on_mismatch): return match -class _Skeleton(namedtuple('Skeleton', - ['scalar_values', 'data'])): - """ +class _Skeleton(namedtuple("Skeleton", ["scalar_values", "data"])): + """Basis of a source-cube. + Basis of a source-cube, containing the associated scalar coordinate values and data payload of a :class:`iris.cube.Cube`. - Args: - - * scalar_values: + Parameters + ---------- + scalar_values : A list of scalar coordinate values belonging to a :class:`iris.cube.Cube` sorted into ascending order of the associated coordinate definition. Each scalar coordinate value is typically an :class:`iris.coords.Cell`. - - * data: + data : The data payload of a :class:`iris.cube.Cube`. """ @@ -434,17 +466,14 @@ class _Skeleton(namedtuple('Skeleton', __slots__ = () -class _FactoryDefn(namedtuple('_FactoryDefn', - ['class_', 'dependency_defns'])): - """ - The information required to identify and rebuild a single AuxCoordFactory. +class _FactoryDefn(namedtuple("_FactoryDefn", ["class_", "dependency_defns"])): + """The information required to identify and rebuild a single AuxCoordFactory. - Args: - - * class_: + Parameters + ---------- + class_ : The class of the AuxCoordFactory. - - * dependency_defns: + dependency_defns : A list of pairs, where each pair contains a dependency key and its corresponding coordinate definition. Sorted on dependency key. @@ -453,19 +482,18 @@ class _FactoryDefn(namedtuple('_FactoryDefn', __slots__ = () -class _Relation(namedtuple('Relation', - ['separable', 'inseparable'])): - """ +class _Relation(namedtuple("Relation", ["separable", "inseparable"])): + """Categorisation of the candidate dimensions. + Categorisation of the candidate dimensions belonging to a :class:`ProtoCube` into separable 'independent' dimensions, and inseparable dependent dimensions. - Args: - - * separable: + Parameters + ---------- + separable : A set of independent candidate dimension names. - - * inseperable: + inseparable : A set of dependent candidate dimension names. """ @@ -473,28 +501,31 @@ class _Relation(namedtuple('Relation', __slots__ = () -_COMBINATION_JOIN = '-' +_COMBINATION_JOIN = "-" def _is_combination(name): - """ + """Determine whether the candidate dimension is an 'invented' combination. + Determine whether the candidate dimension is an 'invented' combination of candidate dimensions. - Args: - - * name: + Parameters + ---------- + name : The candidate dimension. - Returns: - Boolean. + Returns + ------- + bool """ return _COMBINATION_JOIN in str(name) def build_indexes(positions): - """ + r"""Construct a mapping for each candidate dimension. + Construct a mapping for each candidate dimension that maps for each of its scalar values the set of values for each of the other candidate dimensions. @@ -527,84 +558,86 @@ def build_indexes(positions): 200: 'a': set([1]) 'b': set([10]) 300: 'a': set([2]) 'b': set([20]) - Args: - - * positions: + Parameters + ---------- + positions : A list containing a dictionary of candidate dimension key to scalar value pairs for each source-cube. - Returns: - The cross-reference dictionary for each candidate dimension. + Returns + ------- + The cross-reference dictionary for each candidate dimension. """ names = positions[0].keys() scalar_index_by_name = {name: {} for name in names} for position in positions: - for name, value in six.iteritems(position): + for name, value in position.items(): name_index_by_scalar = scalar_index_by_name[name] if value in name_index_by_scalar: value_index_by_name = name_index_by_scalar[value] for other_name in names: if other_name != name: - value_index_by_name[other_name].add( - position[other_name]) + value_index_by_name[other_name].add(position[other_name]) else: name_index_by_scalar[value] = { other_name: set((position[other_name],)) - for other_name in names if other_name != name} + for other_name in names + if other_name != name + } return scalar_index_by_name def _separable_pair(name, index): - """ - Determine whether the candidate dimension is separable. + """Determine whether the candidate dimension is separable. A candidate dimension X and Y are separable if each scalar value of X maps to the same set of scalar values of Y. - Args: - - * name1: + Parameters + ---------- + name1 : The first candidate dimension to be compared. - - * name2: + name2 : The second candidate dimension to be compared. - - * index: + index : The cross-reference dictionary for the first candidate dimension. - Returns: - Boolean. + Returns + ------- + bool """ - items = six.itervalues(index) + items = iter(index.values()) reference = next(items)[name] return all([item[name] == reference for item in items]) def _separable(name, indexes): - """ + """Determine the candidate dimensions that are separable and inseparable. + Determine the candidate dimensions that are separable and inseparable relative to the provided candidate dimension. A candidate dimension X and Y are separable if each scalar value of X maps to the same set of scalar values of Y. - Args: - - * name: + Parameters + ---------- + name : The candidate dimension that requires its separable and inseparable relationship to be determined. - - * indexes: + indexes : The cross-reference dictionary for each candidate dimension. - Returns: + Returns + ------- + tuple A tuple containing the set of separable and inseparable candidate dimensions. @@ -623,7 +656,8 @@ def _separable(name, indexes): def derive_relation_matrix(indexes): - """ + """Construct a mapping for each candidate dimension. + Construct a mapping for each candidate dimension that specifies which of the other candidate dimensions are separable or inseparable. @@ -648,13 +682,14 @@ def derive_relation_matrix(indexes): 'c': Relation(separable=set([]), inseparable=set(['a', 'b'])) 'b': Relation(separable=set([]), inseparable=set(['a', 'c'])) - Args: - - * indexes: + Parameters + ---------- + indexes : The cross-reference dictionary for each candidate dimension. - Returns: - The relation dictionary for each candidate dimension. + Returns + ------- + The relation dictionary for each candidate dimension. """ # TODO: This takes twice as long as it could do because it doesn't @@ -665,19 +700,19 @@ def derive_relation_matrix(indexes): def derive_groups(relation_matrix): - """ - Determine all related (chained) groups of inseparable candidate dimensions. + """Determine all related (chained) groups of inseparable candidate dimensions. If candidate dimension A is inseparable for B and C, and B is inseparable from D, and E is inseparable from F. Then the groups are ABCD and EF. - Args: - - * relation_matrix: + Parameters + ---------- + relation_matrix : The relation dictionary for each candidate dimension. - Returns: - A list of all related (chained) inseparable candidate dimensions. + Returns + ------- + A list of all related (chained) inseparable candidate dimensions. """ names = set(relation_matrix) @@ -701,20 +736,18 @@ def derive_groups(relation_matrix): def _derive_separable_group(relation_matrix, group): - """ - Determine which candidate dimensions in the group are separable. + """Determine which candidate dimensions in the group are separable. - Args: - - * relation_matrix: + Parameters + ---------- + relation_matrix : The relation dictionary for each candidate dimension. - - * group: + group : A set of related (chained) inseparable candidate dimensions. - Returns: - The set of candidate dimensions within the group that are - separable. + Returns + ------- + The set of candidate dimensions within the group that are separable. """ result = set() @@ -727,34 +760,31 @@ def _derive_separable_group(relation_matrix, group): def _is_dependent(dependent, independent, positions, function_mapping=None): - """ + """Determine whether there exists a one-to-one functional relationship. + Determine whether there exists a one-to-one functional relationship between the independent candidate dimension/s and the dependent candidate dimension. - Args: - - * dependent: + Parameters + ---------- + dependent : A candidate dimension that requires to be functionally dependent on all the independent candidate dimensions. - - * independent: + independent : A list of candidate dimension/s that require to act as the independent variables in a functional relationship. - - * positions: + positions : A list containing a dictionary of candidate dimension key to scalar value pairs for each source-cube. - - Kwargs: - - * function_mapping: + function_mapping : optional A dictionary that enumerates a valid functional relationship between the dependent candidate dimension and the independent candidate dimension/s. - Returns: - Boolean. + Returns + ------- + bool """ valid = True @@ -777,7 +807,8 @@ def _is_dependent(dependent, independent, positions, function_mapping=None): def _derive_consistent_groups(relation_matrix, separable_group): - """ + """Determine the largest combinations of candidate dimensions. + Determine the largest combinations of candidate dimensions within the separable group that are self consistently separable from one another. @@ -785,32 +816,33 @@ def _derive_consistent_groups(relation_matrix, separable_group): B and C. Then the candidate dimension group ABC is a separable consistent group if B is separable from A and C, and C is separable from A and B. - Args: - - * relation_matrix: + Parameters + ---------- + relation_matrix : The relation dictionary for each candidate dimension. - - * separable_group: + separable_group : The set of candidate dimensions that are separable. - Returns: - A list of candidate dimension groups that are consistently separable. + Returns + ------- + A list of candidate dimension groups that are consistently separable. """ result = [] for name in separable_group: - name_separable_group = relation_matrix[name].separable & \ - separable_group + name_separable_group = relation_matrix[name].separable & separable_group candidate = list(name_separable_group) + [name] valid = True for _ in range(len(name_separable_group)): candidate_separable_group = set(candidate[1:]) - if candidate_separable_group & \ - (relation_matrix[candidate[0]].separable & - separable_group) != candidate_separable_group: + if ( + candidate_separable_group + & (relation_matrix[candidate[0]].separable & separable_group) + != candidate_separable_group + ): valid = False break @@ -822,9 +854,11 @@ def _derive_consistent_groups(relation_matrix, separable_group): return result -def _build_separable_group(space, group, separable_consistent_groups, - positions, function_matrix): - """ +def _build_separable_group( + space, group, separable_consistent_groups, positions, function_matrix +): + """Update the space with the first separable consistent group. + Update the space with the first separable consistent group that satisfies a valid functional relationship with all other candidate dimensions in the group. @@ -836,28 +870,25 @@ def _build_separable_group(space, group, separable_consistent_groups, and D, and "C: None" means that this candidate dimension is independent. - Args: - - * space: + Parameters + ---------- + space : A dictionary defining for each candidate dimension its dependency on any other candidate dimensions within the space. - - * group: + group : A set of related (chained) inseparable candidate dimensions. - - * separable_consistent_groups: + separable_consistent_groups : A list of candidate dimension groups that are consistently separable. - - * positions: + positions : A list containing a dictionary of candidate dimension key to scalar value pairs for each source-cube. - - * function_matrix: + function_matrix : The function mapping dictionary for each candidate dimension that participates in a functional relationship. - Returns: - Boolean. + Returns + ------- + bool """ valid = False @@ -868,8 +899,7 @@ def _build_separable_group(space, group, separable_consistent_groups, for name in dependent: function_mapping = {} - valid = _is_dependent(name, independent, positions, - function_mapping) + valid = _is_dependent(name, independent, positions, function_mapping) if not valid: break @@ -890,7 +920,8 @@ def _build_separable_group(space, group, separable_consistent_groups, def _build_inseparable_group(space, group, positions, function_matrix): - """ + """Update the space with the first valid scalar functional relationship. + Update the space with the first valid scalar functional relationship between a candidate dimension within the group and all other candidate dimensions. @@ -906,25 +937,23 @@ def _build_inseparable_group(space, group, positions, function_matrix): and all others in the group, as the group is considered inseparable in this context. - Args: - - * space: + Parameters + ---------- + space : A dictionary defining for each candidate dimension its dependency on any other candidate dimensions within the space. - - * group: + group : A set of related (chained) inseparable candidate dimensions. - - * positions: + positions : A list containing a dictionary of candidate dimension key to scalar value pairs for each source-cube. - - * function_matrix: + function_matrix : The function mapping dictionary for each candidate dimension that participates in a functional relationship. - Returns: - Boolean. + Returns + ------- + bool """ scalar = False @@ -937,8 +966,7 @@ def _build_inseparable_group(space, group, positions, function_matrix): for name in dependent: function_mapping = {} - valid = _is_dependent(name, independent, positions, - function_mapping) + valid = _is_dependent(name, independent, positions, function_mapping) if not valid: break @@ -959,7 +987,8 @@ def _build_inseparable_group(space, group, positions, function_matrix): def _build_combination_group(space, group, positions, function_matrix): - """ + """Update the space with the new combined or invented dimension. + Update the space with the new combined or invented dimension that each member of this inseparable group depends on. @@ -968,25 +997,23 @@ def _build_combination_group(space, group, positions, function_matrix): coordinate associated with it. Rather, it is simply an enumeration of the group members for each of the positions (source-cubes). - Args: - - * space: + Parameters + ---------- + space : A dictionary defining for each candidate dimension its dependency on any other candidate dimensions within the space. - - * group: + group : A set of related (chained) inseparable candidate dimensions. - - * positions: + positions : A list containing a dictionary of candidate dimension key to scalar value pairs for each source-cube. - - * function_matrix: + function_matrix : The function mapping dictionary for each candidate dimension that participates in a functional relationship. - Returns: - None. + Returns + ------- + None """ combination = _COMBINATION_JOIN.join(sorted(map(str, group))) @@ -1001,35 +1028,37 @@ def _build_combination_group(space, group, positions, function_matrix): for position in positions: # Note, the cell double-tuple! This ensures that the cell value for # each member of the group is kept bound together as one key. - cell = (tuple([position[int(member) if member.isdigit() else member] - for member in members]),) + cell = ( + tuple( + [ + position[int(member) if member.isdigit() else member] + for member in members + ] + ), + ) for name in group: function_matrix[name][cell] = position[name] def derive_space(groups, relation_matrix, positions, function_matrix=None): - """ - Determine the relationship between all the candidate dimensions. - - Args: - * groups: - A list of all related (chained) inseparable candidate dimensions. - - * relation_matrix: - The relation dictionary for each candidate dimension. - - * positions: - A list containing a dictionary of candidate dimension key to - scalar value pairs for each source-cube. + """Determine the relationship between all the candidate dimensions. - Kwargs: - * function_matrix: + Parameters + ---------- + groups : + A list of all related (chained) inseparable candidate dimensions. + relation_matrix : + The relation dictionary for each candidate dimension. + positions : + A list containing a dictionary of candidate dimension key to + scalar value pairs for each source-cube. + function_matrix : optional The function mapping dictionary for each candidate dimension that participates in a functional relationship. - Returns: - A space dictionary describing the relationship between each - candidate dimension. + Returns + ------- + A space dictionary describing the relationship between each candidate dimension. """ space = {} @@ -1045,47 +1074,46 @@ def derive_space(groups, relation_matrix, positions, function_matrix=None): elif separable_group: # Determine the largest combination of the candidate dimensions # in the separable group that are consistently separable. - consistent_groups = _derive_consistent_groups(relation_matrix, - separable_group) - if not _build_separable_group(space, group, consistent_groups, - positions, function_matrix): + consistent_groups = _derive_consistent_groups( + relation_matrix, separable_group + ) + if not _build_separable_group( + space, group, consistent_groups, positions, function_matrix + ): # There is no relationship between any of the candidate # dimensions in the separable group, so merge them together # into a new combined dimension of the space. - _build_combination_group(space, group, - positions, function_matrix) + _build_combination_group(space, group, positions, function_matrix) else: # Determine whether there is a scalar relationship between one of # the candidate dimensions and each of the other candidate # dimensions in this inseparable group. - if not _build_inseparable_group(space, group, - positions, function_matrix): + if not _build_inseparable_group(space, group, positions, function_matrix): # There is no relationship between any of the candidate # dimensions in this inseparable group, so merge them together # into a new combined dimension of the space. - _build_combination_group(space, group, - positions, function_matrix) + _build_combination_group(space, group, positions, function_matrix) return space -class ProtoCube(object): - """ - Framework for merging source-cubes into one or more higher - dimensional cubes. - - """ +class ProtoCube: + """Framework for merging source-cubes into one or more higher dimensional cubes.""" def __init__(self, cube): - """ - Create a new ProtoCube from the given cube and record the cube - as a source-cube. + """Create a new ProtoCube from the given cube. - """ + Create a new ProtoCube from the given cube and record the cube as a + source-cube. + """ # Default hint ordering for candidate dimension coordinates. - self._hints = ['time', 'forecast_reference_time', 'forecast_period', - 'model_level_number'] + self._hints = [ + "time", + "forecast_reference_time", + "forecast_period", + "model_level_number", + ] # The proto-cube source. self._source = cube @@ -1130,48 +1158,55 @@ def __init__(self, cube): self._vector_dim_coords_dims = [] self._vector_aux_coords_dims = [] - # cell measures are not merge candidates + # cell measures and ancillary variables are not merge candidates # they are checked and preserved through merge self._cell_measures_and_dims = cube._cell_measures_and_dims + self._ancillary_variables_and_dims = cube._ancillary_variables_and_dims def _report_duplicate(self, nd_indexes, group_by_nd_index): # Find the first offending source-cube with duplicate metadata. - index = [group_by_nd_index[nd_index][1] - for nd_index in nd_indexes - if len(group_by_nd_index[nd_index]) > 1][0] + index = [ + group_by_nd_index[nd_index][1] + for nd_index in nd_indexes + if len(group_by_nd_index[nd_index]) > 1 + ][0] name = self._cube_signature.defn.name() scalars = [] - for defn, value in zip(self._coord_signature.scalar_defns, - self._skeletons[index].scalar_values): - scalars.append('%s=%r' % (defn.name(), value)) - msg = 'Duplicate %r cube, with scalar coordinates %s' - msg = msg % (name, ', '.join(scalars)) + for defn, value in zip( + self._coord_signature.scalar_defns, + self._skeletons[index].scalar_values, + ): + scalars.append("%s=%r" % (defn.name(), value)) + msg = "Duplicate %r cube, with scalar coordinates %s" + msg = msg % (name, ", ".join(scalars)) raise iris.exceptions.DuplicateDataError(msg) def merge(self, unique=True): - """ - Returns the list of cubes resulting from merging the registered - source-cubes. + """Return the list of cubes resulting from merging the registered source-cubes. - Kwargs: - - * unique: + Parameters + ---------- + unique : bool, default=True If True, raises `iris.exceptions.DuplicateDataError` if duplicate cubes are detected. - Returns: - A :class:`iris.cube.CubeList` of merged cubes. + Returns + ------- + A :class:`iris.cube.CubeList` of merged cubes. """ - positions = [{i: v for i, v in enumerate(skeleton.scalar_values)} - for skeleton in self._skeletons] + positions = [ + {i: v for i, v in enumerate(skeleton.scalar_values)} + for skeleton in self._skeletons + ] indexes = build_indexes(positions) relation_matrix = derive_relation_matrix(indexes) groups = derive_groups(relation_matrix) function_matrix = {} - space = derive_space(groups, relation_matrix, positions, - function_matrix=function_matrix) + space = derive_space( + groups, relation_matrix, positions, function_matrix=function_matrix + ) self._define_space(space, positions, indexes, function_matrix) self._build_coordinates() @@ -1195,20 +1230,12 @@ def merge(self, unique=True): # Generate group-depth merged cubes from the source-cubes. for level in range(group_depth): - # Track the largest dtype of the data to be merged. - # Unfortunately, da.stack() is not symmetric with regards - # to dtypes. So stacking float + int yields a float, but - # stacking an int + float yields an int! We need to ensure - # that the largest dtype prevails i.e. float, in order to - # support the masked case for dask. - # Reference https://github.com/dask/dask/issues/2273. - dtype = None # Stack up all the data from all of the relevant source # cubes in a single dask "stacked" array. # If it turns out that all the source cubes already had # their data loaded then at the end we convert the stack back # into a plain numpy array. - stack = np.empty(self._stack_shape, 'object') + stack = np.empty(self._stack_shape, "object") all_have_data = True for nd_index in nd_indexes: # Get the data of the current existing or last known @@ -1223,21 +1250,11 @@ def merge(self, unique=True): else: data = as_lazy_data(data) stack[nd_index] = data - # Determine the largest dtype. - if dtype is None: - dtype = data.dtype - else: - dtype = np.promote_types(data.dtype, dtype) - - # Coerce to the largest dtype. - for nd_index in nd_indexes: - stack[nd_index] = stack[nd_index].astype(dtype) merged_data = multidim_lazy_stack(stack) if all_have_data: # All inputs were concrete, so turn the result back into a # normal array. - dtype = self._cube_signature.data_type merged_data = as_concrete_data(merged_data) merged_cube = self._get_cube(merged_data) merged_cubes.append(merged_cube) @@ -1245,7 +1262,8 @@ def merge(self, unique=True): return merged_cubes def register(self, cube, error_on_mismatch=False): - """ + """Add a compatible :class:`iris.cube.Cube` as a source for merging. + Add a compatible :class:`iris.cube.Cube` as a source-cube for merging under this :class:`ProtoCube`. @@ -1253,20 +1271,19 @@ def register(self, cube, error_on_mismatch=False): cube and the signature of its scalar coordinates and vector coordinates being identical to that of the ProtoCube. - Args: - - * cube: + Parameters + ---------- + cube : Candidate :class:`iris.cube.Cube` to be associated with this :class:`ProtoCube`. - - Kwargs: - - * error_on_mismatch: + error_on_mismatch : bool, default=False If True, raise an informative :class:`~iris.exceptions.MergeError` if registration fails. - Returns: - True iff the :class:`iris.cube.Cube` is compatible with + Returns + ------- + bool + True if the :class:`iris.cube.Cube` is compatible with this :class:`ProtoCube`. """ @@ -1275,16 +1292,16 @@ def register(self, cube, error_on_mismatch=False): match = cube_signature.match(other, error_on_mismatch) if match: coord_payload = self._extract_coord_payload(cube) - match = coord_payload.match_signature(self._coord_signature, - error_on_mismatch) + match = coord_payload.match_signature( + self._coord_signature, error_on_mismatch + ) if match: # Register the cube as a source-cube for this ProtoCube. self._add_cube(cube, coord_payload) return match def _guess_axis(self, name): - """ - Returns a "best guess" axis name of the candidate dimension. + """Return a "best guess" axis name of the candidate dimension. Heuristic categoration of the candidate dimension (i.e. scalar_defn index) into either label 'T', 'Z', 'Y', 'X' @@ -1293,13 +1310,15 @@ def _guess_axis(self, name): Based on the associated scalar coordinate definition rather than the scalar coordinate itself. - Args: - - * name: + Parameters + ---------- + name : The candidate dimension. - Returns: - 'T', 'Z', 'Y', 'X', or None. + Returns + ------- + str or None + {'T', 'Z', 'Y', 'X'} or None. """ axis = None @@ -1311,36 +1330,36 @@ def _guess_axis(self, name): return axis def _define_space(self, space, positions, indexes, function_matrix): - """ + """Define space. + Given the derived :class:`ProtoCube` space, define this space in terms of its dimensionality, shape, coordinates and associated coordinate to space dimension mappings. - Args: - - * space: + Parameters + ---------- + space : A dictionary defining for each candidate dimension its dependency on any other candidate dimensions within the space. - - * positions: + positions : A list containing a dictionary of candidate dimension key to scalar value pairs for each source-cube. - - * indexes: + indexes : A cross-reference dictionary for each candidate dimension. - - * function_matrix: + function_matrix : The function mapping dictionary for each candidate dimension that participates in a functional relationship. """ - # Heuristic reordering of coordinate defintion indexes into + + # Heuristic reordering of coordinate definition indexes into # preferred dimension order. def axis_and_name(name): - axis_dict = {'T': 1, 'Z': 2, 'Y': 3, 'X': 4} + axis_dict = {"T": 1, "Z": 2, "Y": 3, "X": 4} axis_index = axis_dict.get(self._guess_axis(name), 0) # The middle element ensures sorting is the same as Python 2. - return (axis_index, not isinstance(name, six.integer_types), name) + return (axis_index, not isinstance(name, int), name) + names = sorted(space, key=axis_and_name) dim_by_name = {} @@ -1361,55 +1380,71 @@ def axis_and_name(name): # are used to retain the ordering of source cubes but to # remove any duplicate tuples. cells = OrderedDict( - (tuple(position[int(member) if member.isdigit() else - member] for member in members), None) - for position in positions).keys() + ( + tuple( + position[int(member) if member.isdigit() else member] + for member in members + ), + None, + ) + for position in positions + ).keys() dim_by_name[name] = len(self._shape) self._nd_names.append(name) self._shape.append(len(cells)) self._stack_shape.append(len(cells)) - self._cache_by_name[name] = {cell: index for index, cell - in enumerate(cells)} + self._cache_by_name[name] = { + cell: index for index, cell in enumerate(cells) + } else: # TODO: Consider appropriate sort order (ascending, - # decending) i.e. use CF positive attribute. + # descending) i.e. use CF positive attribute. cells = sorted(indexes[name]) - points = np.array([cell.point for cell in cells], - dtype=metadata[name].points_dtype) + points = np.array( + [cell.point for cell in cells], + dtype=metadata[name].points_dtype, + ) if cells[0].bound is not None: - bounds = np.array([cell.bound for cell in cells], - dtype=metadata[name].bounds_dtype) + bounds = np.array( + [cell.bound for cell in cells], + dtype=metadata[name].bounds_dtype, + ) else: bounds = None - kwargs = dict(zip(iris.coords.CoordDefn._fields, - defns[name])) + kwargs = dict(zip(CoordMetadata._fields, defns[name])) kwargs.update(metadata[name].kwargs) def name_in_independents(): - return any(name in independents - for independents in six.itervalues(space) - if independents is not None) + return any( + name in independents + for independents in space.values() + if independents is not None + ) + if len(cells) == 1 and not name_in_independents(): # A scalar coordinate not participating in a # function dependency. self._aux_templates.append( - _Template((), points, bounds, kwargs)) + _Template((), points, bounds, kwargs) + ) else: # Dimension coordinate (or aux if the data is # string like). dim_by_name[name] = dim = len(self._shape) self._nd_names.append(name) - if metadata[name].points_dtype.kind in 'SU': + if metadata[name].points_dtype.kind in "SU": self._aux_templates.append( - _Template(dim, points, bounds, kwargs)) + _Template(dim, points, bounds, kwargs) + ) else: self._dim_templates.append( - _Template(dim, points, bounds, kwargs)) + _Template(dim, points, bounds, kwargs) + ) self._shape.append(len(cells)) self._stack_shape.append(len(cells)) - self._cache_by_name[name] = {cell: index - for index, cell - in enumerate(cells)} + self._cache_by_name[name] = { + cell: index for index, cell in enumerate(cells) + } # Second pass - Build the auxiliary coordinate templates for the space. for name in names: @@ -1418,8 +1453,9 @@ def name_in_independents(): # Determine if there is a function dependency. if name_independents is not None: # Calculate the auxiliary coordinate shape. - dims = tuple([dim_by_name[independent] - for independent in name_independents]) + dims = tuple( + [dim_by_name[independent] for independent in name_independents] + ) aux_shape = [self._shape[dim] for dim in dims] # Create empty points and bounds in preparation to be filled. points = np.empty(aux_shape, dtype=metadata[name].points_dtype) @@ -1431,15 +1467,14 @@ def name_in_independents(): # Populate the points and bounds based on the appropriate # function mapping. - temp = six.iteritems(function_matrix[name]) + temp = function_matrix[name].items() for function_independents, name_value in temp: # Build the index (and cache it) for the auxiliary # coordinate based on the associated independent # dimension coordinate/s. index = [] - name_function_pairs = zip(name_independents, - function_independents) + name_function_pairs = zip(name_independents, function_independents) for independent, independent_value in name_function_pairs: cache = self._cache_by_name[independent] index.append(cache[independent_value]) @@ -1451,43 +1486,54 @@ def name_in_independents(): if bounds is not None: bounds[index] = name_value.bound - kwargs = dict(zip(iris.coords.CoordDefn._fields, defns[name])) - self._aux_templates.append(_Template(dims, points, bounds, - kwargs)) + kwargs = dict(zip(CoordMetadata._fields, defns[name])) + self._aux_templates.append(_Template(dims, points, bounds, kwargs)) # Calculate the dimension mapping for each vector within the space. offset = len(self._shape) - self._vector_dim_coords_dims = \ - [tuple([dim + offset for dim in item.dims]) - for item in vector_dim_coords_and_dims] - self._vector_aux_coords_dims = \ - [tuple([dim + offset for dim in item.dims]) - for item in vector_aux_coords_and_dims] + self._vector_dim_coords_dims = [ + tuple([dim + offset for dim in item.dims]) + for item in vector_dim_coords_and_dims + ] + self._vector_aux_coords_dims = [ + tuple([dim + offset for dim in item.dims]) + for item in vector_aux_coords_and_dims + ] # Now factor in the vector payload shape. Note that, for # deferred loading, this does NOT change the shape. self._shape.extend(signature.data_shape) def _get_cube(self, data): - """ + """Generate fully constructed cube. + Return a fully constructed cube for the given data, containing all its coordinates and metadata. """ signature = self._cube_signature - dim_coords_and_dims = [(deepcopy(coord), dim) - for coord, dim in self._dim_coords_and_dims] - aux_coords_and_dims = [(deepcopy(coord), dims) - for coord, dims in self._aux_coords_and_dims] - kwargs = dict(zip(iris.cube.CubeMetadata._fields, signature.defn)) - - cms_and_dims = [(deepcopy(cm), dims) - for cm, dims in self._cell_measures_and_dims] - cube = iris.cube.Cube(data, - dim_coords_and_dims=dim_coords_and_dims, - aux_coords_and_dims=aux_coords_and_dims, - cell_measures_and_dims=cms_and_dims, - **kwargs) + dim_coords_and_dims = [ + (deepcopy(coord), dim) for coord, dim in self._dim_coords_and_dims + ] + aux_coords_and_dims = [ + (deepcopy(coord), dims) for coord, dims in self._aux_coords_and_dims + ] + kwargs = dict(zip(CubeMetadata._fields, signature.defn)) + + cms_and_dims = [ + (deepcopy(cm), dims) for cm, dims in self._cell_measures_and_dims + ] + avs_and_dims = [ + (deepcopy(av), dims) for av, dims in self._ancillary_variables_and_dims + ] + cube = iris.cube.Cube( + data, + dim_coords_and_dims=dim_coords_and_dims, + aux_coords_and_dims=aux_coords_and_dims, + cell_measures_and_dims=cms_and_dims, + ancillary_variables_and_dims=avs_and_dims, + **kwargs, + ) # Add on any aux coord factories. for factory_defn in self._coord_signature.factory_defns: @@ -1501,12 +1547,7 @@ def _get_cube(self, data): return cube def _nd_index(self, position): - """ - Returns the n-dimensional index of this source-cube (position), - within the merged cube. - - """ - + """Return the n-dimensional index of thr source-cube, within the merged cube.""" index = [] # Determine the index of the source-cube cell for each dimension. @@ -1515,7 +1556,8 @@ def _nd_index(self, position): members = name.split(_COMBINATION_JOIN) cell = tuple( position[int(member) if member.isdigit() else member] - for member in members) + for member in members + ) index.append(self._cache_by_name[name][cell]) else: index.append(self._cache_by_name[name][position[name]]) @@ -1523,7 +1565,8 @@ def _nd_index(self, position): return tuple(index) def _build_coordinates(self): - """ + """Build the dimension and auxiliary coordinates. + Build the dimension and auxiliary coordinates for the final merged cube given that the final dimensionality of the target merged cube is known and the associated dimension/s that each @@ -1546,17 +1589,18 @@ def _build_coordinates(self): # and if it fails make the coordinate into an auxiliary coordinate. # This will ultimately make an anonymous dimension. try: - coord = iris.coords.DimCoord(template.points, - bounds=template.bounds, - **template.kwargs) + coord = iris.coords.DimCoord( + template.points, bounds=template.bounds, **template.kwargs + ) dim_coords_and_dims.append(_CoordAndDims(coord, template.dims)) except ValueError: self._aux_templates.append(template) # There is the potential that there are still anonymous dimensions. # Get a list of the dimensions which are not anonymous at this stage. - covered_dims = [dim_coord_and_dim.dims - for dim_coord_and_dim in dim_coords_and_dims] + covered_dims = [ + dim_coord_and_dim.dims for dim_coord_and_dim in dim_coords_and_dims + ] # Build the auxiliary coordinates. for template in self._aux_templates: @@ -1564,55 +1608,59 @@ def _build_coordinates(self): # fails e.g it's non-monontic or multi-dimensional or non-numeric, # then build an AuxCoord. try: - coord = iris.coords.DimCoord(template.points, - bounds=template.bounds, - **template.kwargs) - if len(template.dims) == 1 and \ - template.dims[0] not in covered_dims: - dim_coords_and_dims.append( - _CoordAndDims(coord, template.dims)) + coord = iris.coords.DimCoord( + template.points, bounds=template.bounds, **template.kwargs + ) + if len(template.dims) == 1 and template.dims[0] not in covered_dims: + dim_coords_and_dims.append(_CoordAndDims(coord, template.dims)) covered_dims.append(template.dims[0]) else: - aux_coords_and_dims.append( - _CoordAndDims(coord, template.dims)) + aux_coords_and_dims.append(_CoordAndDims(coord, template.dims)) except ValueError: # kwarg not applicable to AuxCoord. - template.kwargs.pop('circular', None) - coord = iris.coords.AuxCoord(template.points, - bounds=template.bounds, - **template.kwargs) + template.kwargs.pop("circular", None) + coord = iris.coords.AuxCoord( + template.points, bounds=template.bounds, **template.kwargs + ) aux_coords_and_dims.append(_CoordAndDims(coord, template.dims)) # Mix in the vector coordinates. - for item, dims in zip(self._coord_signature.vector_dim_coords_and_dims, - self._vector_dim_coords_dims): + for item, dims in zip( + self._coord_signature.vector_dim_coords_and_dims, + self._vector_dim_coords_dims, + ): dim_coords_and_dims.append(_CoordAndDims(item.coord, dims)) - for item, dims in zip(self._coord_signature.vector_aux_coords_and_dims, - self._vector_aux_coords_dims): + for item, dims in zip( + self._coord_signature.vector_aux_coords_and_dims, + self._vector_aux_coords_dims, + ): aux_coords_and_dims.append(_CoordAndDims(item.coord, dims)) def _build_signature(self, cube): - """ - Generate the signature that defines this cube. - - Args: + """Generate the signature that defines this cube. - * cube: + Parameters + ---------- + cube : The source cube to create the cube signature from. - Returns: - The cube signature. + Returns + ------- + The cube signature. """ - - return _CubeSignature(cube.metadata, cube.shape, - cube.dtype, cube._cell_measures_and_dims) + return _CubeSignature( + cube.metadata, + cube.shape, + cube.dtype, + cube._cell_measures_and_dims, + cube._ancillary_variables_and_dims, + ) def _add_cube(self, cube, coord_payload): """Create and add the source-cube skeleton to the ProtoCube.""" - skeleton = _Skeleton(coord_payload.scalar.values, - cube.core_data()) + skeleton = _Skeleton(coord_payload.scalar.values, cube.core_data()) # Attempt to do something sensible with mixed scalar dtypes. for i, metadata in enumerate(coord_payload.scalar.metadata): if metadata.points_dtype > self._coord_metadata[i].points_dtype: @@ -1620,8 +1668,7 @@ def _add_cube(self, cube, coord_payload): self._skeletons.append(skeleton) def _extract_coord_payload(self, cube): - """ - Extract all relevant coordinate data and metadata from the cube. + """Extract all relevant coordinate data and metadata from the cube. In particular, for each scalar coordinate determine its definition, its cell (point and bound) value and all other scalar coordinate @@ -1646,29 +1693,33 @@ def _extract_coord_payload(self, cube): # Coordinate hint ordering dictionary - from most preferred to least. # Copes with duplicate hint entries, where the most preferred is king. - hint_dict = {name: i for i, name in zip(range(len(self._hints), 0, -1), - self._hints[::-1])} + hint_dict = { + name: i + for i, name in zip(range(len(self._hints), 0, -1), self._hints[::-1]) + } # Coordinate axis ordering dictionary. - axis_dict = {'T': 0, 'Z': 1, 'Y': 2, 'X': 3} + axis_dict = {"T": 0, "Z": 1, "Y": 2, "X": 3} # Coordinate sort function. # NB. This makes use of two properties which don't end up in - # the CoordDefn used by scalar_defns: `coord.points.dtype` and + # the metadata used by scalar_defns: `coord.points.dtype` and # `type(coord)`. def key_func(coord): points_dtype = coord.dtype - return (not np.issubdtype(points_dtype, np.number), - not isinstance(coord, iris.coords.DimCoord), - hint_dict.get(coord.name(), len(hint_dict) + 1), - axis_dict.get(iris.util.guess_coord_axis(coord), - len(axis_dict) + 1), - coord._as_defn()) + return ( + not np.issubdtype(points_dtype, np.number), + not isinstance(coord, iris.coords.DimCoord), + hint_dict.get(coord.name(), len(hint_dict) + 1), + axis_dict.get(iris.util.guess_coord_axis(coord), len(axis_dict) + 1), + coord.metadata, + ) # Order the coordinates by hints, axis, and definition. for coord in sorted(coords, key=key_func): - if not cube.coord_dims(coord) and coord.shape == (1,): + dims = tuple(cube.coord_dims(coord)) + if not dims and coord.shape == (1,): # Extract the scalar coordinate data and metadata. - scalar_defns.append(coord._as_defn()) + scalar_defns.append(coord.metadata) # Because we know there's a single Cell in the # coordinate, it's quicker to roll our own than use # Coord.cell(). @@ -1683,33 +1734,31 @@ def key_func(coord): scalar_values.append(iris.coords.Cell(points[0], bounds)) kwargs = {} if isinstance(coord, iris.coords.DimCoord): - kwargs['circular'] = coord.circular - scalar_metadata.append(_CoordMetaData(points_dtype, - bounds_dtype, kwargs)) + kwargs["circular"] = coord.circular + scalar_metadata.append( + _CoordMetaData(points_dtype, bounds_dtype, kwargs) + ) else: # Extract the vector coordinate and metadata. if id(coord) in cube_aux_coord_ids: - vector_aux_coords_and_dims.append( - _CoordAndDims(coord, tuple(cube.coord_dims(coord)))) + vector_aux_coords_and_dims.append(_CoordAndDims(coord, dims)) else: - vector_dim_coords_and_dims.append( - _CoordAndDims(coord, tuple(cube.coord_dims(coord)))) + vector_dim_coords_and_dims.append(_CoordAndDims(coord, dims)) factory_defns = [] - for factory in sorted(cube.aux_factories, - key=lambda factory: factory._as_defn()): + for factory in sorted(cube.aux_factories, key=lambda factory: factory.metadata): dependency_defns = [] dependencies = factory.dependencies for key in sorted(dependencies): coord = dependencies[key] if coord is not None: - dependency_defns.append((key, coord._as_defn())) + dependency_defns.append((key, coord.metadata)) factory_defn = _FactoryDefn(type(factory), dependency_defns) factory_defns.append(factory_defn) - scalar = _ScalarCoordPayload(scalar_defns, scalar_values, - scalar_metadata) - vector = _VectorCoordPayload(vector_dim_coords_and_dims, - vector_aux_coords_and_dims) + scalar = _ScalarCoordPayload(scalar_defns, scalar_values, scalar_metadata) + vector = _VectorCoordPayload( + vector_dim_coords_and_dims, vector_aux_coords_and_dims + ) return _CoordPayload(scalar, vector, factory_defns) diff --git a/lib/iris/_representation/__init__.py b/lib/iris/_representation/__init__.py new file mode 100644 index 0000000000..74de095995 --- /dev/null +++ b/lib/iris/_representation/__init__.py @@ -0,0 +1,5 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Code to make printouts and other representations (e.g. html) of Iris objects.""" diff --git a/lib/iris/_representation/cube_printout.py b/lib/iris/_representation/cube_printout.py new file mode 100644 index 0000000000..1e648b25f6 --- /dev/null +++ b/lib/iris/_representation/cube_printout.py @@ -0,0 +1,349 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Provides text printouts of Iris cubes.""" + +from copy import deepcopy + +from iris._representation.cube_summary import CubeSummary + + +class Table: + """A container of text strings in rows and columns. + + A container of text strings in rows + columns, that can format its content + into a string per row, with contents in columns of fixed width. + + Supports left- or right- aligned columns, alignment being set "per row". + A column may also be set, beyond which output is printed without further + formatting, and without affecting any subsequent column widths. + This is used as a crude alternative to column spanning. + + """ + + def __init__(self, rows=None, col_widths=None): + if rows is None: + rows = [] + self.rows = [deepcopy(row) for row in rows] + self.col_widths = col_widths + + def copy(self): + return Table(self.rows, col_widths=self.col_widths) + + @property + def n_columns(self): + if self.rows: + result = len(self.rows[0].cols) + else: + result = None + return result + + class Row: + """A set of column info, plus per-row formatting controls.""" + + def __init__(self, cols, aligns, i_col_unlimited=None): + assert len(cols) == len(aligns) + self.cols = cols + self.aligns = aligns + self.i_col_unlimited = i_col_unlimited + # This col + those after do not add to width + # - a crude alternative to proper column spanning + + def add_row(self, cols, aligns, i_col_unlimited=None): + """Create a new row at the bottom. + + Parameters + ---------- + cols : list of str + Per-column content. Length must match the other rows (if any). + aligns : list of {'left', 'right'} + Per-column alignments. Length must match 'cols'. + i_col_unlimited : int, optional + Column beyond which content does not affect the column widths. + ( meaning contents will print without limit ). + + """ + n_cols = len(cols) + if len(aligns) != n_cols: + msg = f"Number of aligns ({len(aligns)})" f" != number of cols ({n_cols})" + raise ValueError(msg) + if self.n_columns is not None: + # For now, all rows must have same number of columns + if n_cols != self.n_columns: + msg = ( + f"Number of columns ({n_cols})" + f" != existing table.n_columns ({self.n_columns})" + ) + raise ValueError(msg) + row = self.Row(cols, aligns, i_col_unlimited) + self.rows.append(row) + + def set_min_column_widths(self): + """Set all column widths to minimum required for current content.""" + if self.rows: + widths = [0] * self.n_columns + for row in self.rows: + cols, lim = row.cols, row.i_col_unlimited + if lim is not None: + cols = cols[:lim] # Ignore "unlimited" columns + for i_col, col in enumerate(cols): + widths[i_col] = max(widths[i_col], len(col)) + + self.col_widths = widths + + def formatted_as_strings(self): + """Return lines formatted to the set column widths.""" + if self.col_widths is None: + # If not set, calculate minimum widths. + self.set_min_column_widths() + result_lines = [] + for row in self.rows: + col_texts = [] + for col, align, width in zip(row.cols, row.aligns, self.col_widths): + if align == "left": + col_text = col.ljust(width) + elif align == "right": + col_text = col.rjust(width) + else: + msg = f'Unknown alignment "{align}" ' 'not in ("left", "right")' + raise ValueError(msg) + col_texts.append(col_text) + + row_line = " ".join(col_texts).rstrip() + result_lines.append(row_line) + return result_lines + + def __str__(self): + return "\n".join(self.formatted_as_strings()) + + +class CubePrinter: + """An object created from a cube summary. + + An object created from a + :class:`iris._representation.CubeSummary`, which provides + text printout of a :class:`iris.cube.Cube`. + + This class has no internal knowledge of :class:`iris.cube.Cube`, but only + of :class:`iris._representation.CubeSummary`. + + """ + + N_INDENT_SECTION = 4 + N_INDENT_ITEM = 4 + N_INDENT_EXTRA = 4 + + def __init__(self, cube_or_summary): + """Object that provides a printout of a cube. + + Parameters + ---------- + cube_or_summary : Cube or CubeSummary + If a cube, first create a CubeSummary from it. + + Notes + ----- + .. note:: + The CubePrinter is based on a digest of a CubeSummary, but does + not reference or store it. + + """ + # Create our internal table from the summary, to produce the printouts. + if isinstance(cube_or_summary, CubeSummary): + cube_summary = cube_or_summary + else: + cube_summary = CubeSummary(cube_or_summary) + self.table = self._ingest_summary(cube_summary) + + def _ingest_summary(self, cube_summary): + """Make a table of strings representing the cube-summary.""" + sect_indent = " " * self.N_INDENT_SECTION + item_indent = sect_indent + " " * self.N_INDENT_ITEM + item_to_extra_indent = " " * self.N_INDENT_EXTRA + extra_indent = item_indent + item_to_extra_indent + + fullheader = cube_summary.header + nameunits_string = fullheader.nameunit + dimheader = fullheader.dimension_header + cube_is_scalar = dimheader.scalar + + cube_shape = dimheader.shape # may be empty + dim_names = dimheader.dim_names # may be empty + n_dims = len(dim_names) + assert len(cube_shape) == n_dims + + # First setup the columns + # - x1 @0 column-1 content : main title; headings; elements-names + # - x1 @1 "value" content (for scalar items) + # - OR x2n @1.. (name, length) for each of n dimensions + column_header_texts = [nameunits_string] # Note extra spacer here + + if cube_is_scalar: + # We will put this in the column-1 position (replacing the dim-map) + column_header_texts.append("(scalar cube)") + else: + for dim_name, length in zip(dim_names, cube_shape): + column_header_texts.append(f"{dim_name}:") + column_header_texts.append(f"{length:d}") + + n_cols = len(column_header_texts) + + # Create a table : a (n_rows) list of (n_cols) strings + + table = Table() + + # Code for adding a row, with control options. + scalar_column_aligns = ["left"] * n_cols + vector_column_aligns = deepcopy(scalar_column_aligns) + if cube_is_scalar: + vector_column_aligns[1] = "left" + else: + vector_column_aligns[1:] = n_dims * ["right", "left"] + + def add_row(col_texts, scalar=False): + aligns = scalar_column_aligns if scalar else vector_column_aligns + i_col_unlimited = 1 if scalar else None + n_missing = n_cols - len(col_texts) + col_texts += [" "] * n_missing + table.add_row(col_texts, aligns, i_col_unlimited=i_col_unlimited) + + # Start with the header line + add_row(column_header_texts) + + # Add rows from all the vector sections + for sect in cube_summary.vector_sections.values(): + if sect.contents: + sect_name = sect.title + column_texts = [sect_indent + sect_name] + add_row(column_texts) + for vec_summary in sect.contents: + element_name = vec_summary.name + dim_chars = vec_summary.dim_chars + extra_string = vec_summary.extra + column_texts = [item_indent + element_name] + for dim_char in dim_chars: + column_texts += [dim_char, ""] + add_row(column_texts) + if extra_string: + column_texts = [extra_indent + extra_string] + add_row(column_texts) + + # Similar for scalar sections + for sect in cube_summary.scalar_sections.values(): + if sect.contents: + # Add a row for the "section title" text. + sect_name = sect.title + add_row([sect_indent + sect_name]) + + def add_scalar_row(name, value=""): + column_texts = [item_indent + name, value] + add_row(column_texts, scalar=True) + + # Add a row for each item + # NOTE: different section types need different handling + title = sect_name.lower() + if title == "scalar coordinates:": + for item in sect.contents: + add_scalar_row(item.name, item.content) + if item.extra: + add_scalar_row(item_to_extra_indent + item.extra) + elif title in ("attributes:", "cell methods:", "mesh:"): + for title, value in zip(sect.names, sect.values): + add_scalar_row(title, value) + elif title in ( + "scalar ancillary variables:", + "scalar cell measures:", + ): + # These are just strings: nothing in the 'value' column. + for name in sect.contents: + add_scalar_row(name) + else: + msg = f"Unknown section type : {type(sect)}" + raise ValueError(msg) + + return table + + @staticmethod + def _decorated_table(table, name_padding=None): + """Return a modified table with added characters in the header. + + Note: 'name_padding' sets a minimum width for the name column (#0). + + """ + # Copy the input table + extract the header + its columns. + table = table.copy() + header = table.rows[0] + cols = header.cols + + if name_padding: + # Extend header column#0 to a given minimum width. + cols[0] = cols[0].ljust(name_padding) + + # Add parentheses around the dim column texts. + # -- unless already present, e.g. "(scalar cube)". + if len(cols) > 1 and not cols[1].startswith("("): + # Add parentheses around the dim columns + cols[1] = "(" + cols[1] + cols[-1] = cols[-1] + ")" + + # Add semicolons as dim column spacers + for i_col in range(2, len(cols) - 1, 2): + cols[i_col] += ";" + + # Modify the new table to be returned, invalidate any stored widths. + header.cols = cols + table.rows[0] = header + + # Recalc widths + table.set_min_column_widths() + + return table + + def _oneline_string(self, name_padding): + """Produce a one-line summary string.""" + # Copy existing content -- just the header line. + table = Table(rows=[self.table.rows[0]]) + # Note: by excluding other columns, we get a minimum-width result. + + # Add standard decorations. + table = self._decorated_table(table, name_padding=name_padding) + + # Format (with no extra spacing) --> one-line result + (oneline_result,) = table.formatted_as_strings() + return oneline_result + + def _multiline_summary(self, name_padding): + """Produce a multi-line summary string.""" + # Get a derived table with standard 'decorations' added. + table = self._decorated_table(self.table, name_padding=name_padding) + result_lines = table.formatted_as_strings() + result = "\n".join(result_lines) + return result + + def to_string(self, oneline=False, name_padding=35): + """Produce a printable summary. + + Parameters + ---------- + oneline : bool, default=False + If set, produce a one-line summary. + Default is False = produce full (multiline) summary. + name_padding : int, default=35 + The minimum width for the "name" (#0) column. + + Returns + ------- + str + + """ + if oneline: + result = self._oneline_string(name_padding) + else: + result = self._multiline_summary(name_padding) + + return result + + def __str__(self): + """Printout of self, as a full multiline string.""" + return self.to_string() diff --git a/lib/iris/_representation/cube_summary.py b/lib/iris/_representation/cube_summary.py new file mode 100644 index 0000000000..a28bfc549a --- /dev/null +++ b/lib/iris/_representation/cube_summary.py @@ -0,0 +1,402 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Provides objects describing cube summaries.""" + +import re + +import numpy as np + +import iris._lazy_data as _lazy +from iris.common.metadata import hexdigest +import iris.util + + +class DimensionHeader: + def __init__(self, cube): + if cube.shape == (): + self.scalar = True + self.dim_names = [] + self.shape = [] + self.contents = ["scalar cube"] + else: + self.scalar = False + self.dim_names = [] + for dim in range(len(cube.shape)): + dim_coords = cube.coords(contains_dimension=dim, dim_coords=True) + if dim_coords: + self.dim_names.append(dim_coords[0].name()) + else: + self.dim_names.append("-- ") + self.shape = list(cube.shape) + self.contents = [ + name + ": %d" % dim_len + for name, dim_len in zip(self.dim_names, self.shape) + ] + + +class FullHeader: + def __init__(self, cube, name_padding=35): + self.name = cube.name() + self.unit = cube.units + self.nameunit = "{name} / ({units})".format(name=self.name, units=self.unit) + self.name_padding = name_padding + self.dimension_header = DimensionHeader(cube) + + +def string_repr(text, quote_strings=False, clip_strings=False): + """Produce a one-line printable form of a text string.""" + force_quoted = re.findall("[\n\t]", text) or quote_strings + if force_quoted: + # Replace the string with its repr (including quotes). + text = repr(text) + if clip_strings: + # First check for quotes. + # N.B. not just 'quote_strings', but also array values-as-strings + has_quotes = text[0] in "\"'" + if has_quotes: + # Strip off (and store) any outer quotes before clipping. + pre_quote, post_quote = text[0], text[-1] + text = text[1:-1] + # clipping : use 'rider' with extra space in case it ends in a '.' + text = iris.util.clip_string(text, rider=" ...") + if has_quotes: + # Replace in original quotes + text = pre_quote + text + post_quote + return text + + +def array_repr(arr): + """Produce a single-line printable repr of an array.""" + # First take whatever numpy produces.. + text = repr(arr) + # ..then reduce any multiple spaces and newlines. + text = re.sub("[ \t\n]+", " ", text) + text = string_repr(text, quote_strings=False, clip_strings=True) + return text + + +def value_repr(value, quote_strings=False, clip_strings=False): + """Produce a single-line printable version of an attribute or scalar value.""" + if hasattr(value, "dtype"): + value = array_repr(value) + elif isinstance(value, str): + value = string_repr( + value, quote_strings=quote_strings, clip_strings=clip_strings + ) + value = str(value) + return value + + +class CoordSummary: + def _summary_coord_extra(self, cube, coord): + # Returns the text needed to ensure this coordinate can be + # distinguished from all others with the same name. + extra = "" + similar_coords = cube.coords(coord.name()) + if len(similar_coords) > 1: + # Find all the attribute keys + keys = set() + for similar_coord in similar_coords: + keys.update(similar_coord.attributes.keys()) + # Look for any attributes that vary + vary = set() + attributes = {} + for key in keys: + for similar_coord in similar_coords: + if key not in similar_coord.attributes: + vary.add(key) + break + value = similar_coord.attributes[key] + # Like "if attributes.setdefault(key, value) != value:" + # ..except setdefault fails if values are numpy arrays. + if key not in attributes: + attributes[key] = value + elif hexdigest(attributes[key]) != hexdigest(value): + # NOTE: fast and array-safe comparison, as used in + # :mod:`iris.common.metadata`. + vary.add(key) + break + keys = sorted(vary & set(coord.attributes.keys())) + bits = [ + "{}={}".format( + key, value_repr(coord.attributes[key], quote_strings=True) + ) + for key in keys + ] + if bits: + extra = ", ".join(bits) + return extra + + +class VectorSummary(CoordSummary): + def __init__(self, cube, vector, iscoord): + self.name = iris.util.clip_string(vector.name()) + dims = vector.cube_dims(cube) + self.dim_chars = ["x" if dim in dims else "-" for dim in range(len(cube.shape))] + if iscoord: + extra = self._summary_coord_extra(cube, vector) + self.extra = iris.util.clip_string(extra) + else: + self.extra = "" + + +class ScalarCoordSummary(CoordSummary): + def __init__(self, cube, coord): + self.name = coord.name() + if ( + coord.units in ["1", "no_unit", "unknown"] + or coord.units.is_time_reference() + ): + self.unit = "" + else: + self.unit = " {!s}".format(coord.units) + + # Don't print values of lazy coords, as computing them could cost a lot. + safe_to_print = not _lazy.is_lazy_data(coord.core_points()) + if not safe_to_print: + # However there is a special case: If it is a *factory* coord, then those + # are generally lazy. If all the dependencies are real, then it is useful + # (and safe) to compute + print the value. + for factory in cube._aux_factories: + # Note : a factory doesn't have a ".metadata" which can be matched + # against a coord. For now, just assume that it has a 'standard_name' + # property (also not actually guaranteed), and require them to match. + if coord.standard_name == factory.standard_name: + all_deps_real = True + for dependency_coord in factory.dependencies.values(): + if ( + dependency_coord.has_lazy_points() + or dependency_coord.has_lazy_bounds() + ): + all_deps_real = False + + if all_deps_real: + safe_to_print = True + + if safe_to_print: + coord_cell = coord.cell(0) + else: + coord_cell = None + + if coord.dtype.type is np.str_: + self.string_type = True + if coord_cell is not None: + # 'lines' is value split on '\n', and _each one_ length-clipped. + self.lines = [ + iris.util.clip_string(str(item)) + for item in coord_cell.point.split("\n") + ] + # 'content' contains a one-line printable version of the string, + content = string_repr(coord_cell.point) + content = iris.util.clip_string(content) + else: + content = "" + self.lines = [content] + self.point = None + self.bound = None + self.content = content + else: + self.string_type = False + self.lines = None + coord_cell_cbound = None + if coord_cell is not None: + self.point = "{!s}".format(coord_cell.point) + coord_cell_cbound = coord_cell.bound + else: + self.point = "" + + if coord_cell_cbound is not None: + self.bound = "({})".format( + ", ".join(str(val) for val in coord_cell_cbound) + ) + self.content = "{}{}, bound={}{}".format( + self.point, self.unit, self.bound, self.unit + ) + elif coord.has_bounds(): + self.bound = "+bound" + self.content = "{}{}".format(self.point, self.bound) + else: + self.bound = None + self.content = "{}{}".format(self.point, self.unit) + extra = self._summary_coord_extra(cube, coord) + self.extra = iris.util.clip_string(extra) + + +class Section: + def is_empty(self): + return self.contents == [] + + +class VectorSection(Section): + def __init__(self, title, cube, vectors, iscoord): + self.title = title + self.contents = [VectorSummary(cube, vector, iscoord) for vector in vectors] + + +class ScalarCoordSection(Section): + def __init__(self, title, cube, scalars): + self.title = title + self.contents = [ScalarCoordSummary(cube, scalar) for scalar in scalars] + + +class ScalarCellMeasureSection(Section): + def __init__(self, title, cell_measures): + self.title = title + self.contents = [cm.name() for cm in cell_measures] + + +class ScalarAncillaryVariableSection(Section): + def __init__(self, title, ancillary_variables): + self.title = title + self.contents = [av.name() for av in ancillary_variables] + + +class AttributeSection(Section): + def __init__(self, title, attributes): + self.title = title + self.names = [] + self.values = [] + self.contents = [] + for name, value in sorted(attributes.items()): + value = value_repr(value, quote_strings=True, clip_strings=True) + self.names.append(name) + self.values.append(value) + content = "{}: {}".format(name, value) + self.contents.append(content) + + +class ScalarMeshSection(AttributeSection): + # This happens to behave just like an attribute sections, but it + # initialises direct from the cube. + def __init__(self, title, cube): + self.title = title + self.names = [] + self.values = [] + self.contents = [] + if cube.mesh is not None: + self.names.extend(["name", "location"]) + self.values.extend([cube.mesh.name(), cube.location]) + self.contents.extend( + [ + "{}: {}".format(name, value) + for name, value in zip(self.names, self.values) + ] + ) + + +class CellMethodSection(Section): + def __init__(self, title, cell_methods): + self.title = title + self.names = [] + self.values = [] + self.contents = [] + for index, method in enumerate(cell_methods): + value = str(method) + self.names.append(str(index)) + self.values.append(value) + content = "{}: {}".format(index, value) + self.contents.append(content) + + +class CubeSummary: + """Provide a structure for output representations of an Iris cube.""" + + def __init__(self, cube, name_padding=35): + self.header = FullHeader(cube, name_padding) + + # Cache the derived coords so we can rely on consistent + # object IDs. + derived_coords = cube.derived_coords + # Determine the cube coordinates that are scalar (single-valued) + # AND non-dimensioned. + dim_coords = cube.dim_coords + aux_coords = cube.aux_coords + all_coords = dim_coords + aux_coords + derived_coords + scalar_coords = [ + coord + for coord in all_coords + if not cube.coord_dims(coord) and coord.shape == (1,) + ] + # Determine the cube coordinates that are not scalar BUT + # dimensioned. + scalar_coord_ids = set(map(id, scalar_coords)) + vector_dim_coords = [ + coord for coord in dim_coords if id(coord) not in scalar_coord_ids + ] + if cube.mesh is None: + mesh_coords = [] + else: + mesh_coords = [coord for coord in aux_coords if hasattr(coord, "mesh")] + + vector_aux_coords = [ + coord + for coord in aux_coords + if (id(coord) not in scalar_coord_ids and coord not in mesh_coords) + ] + vector_derived_coords = [ + coord for coord in derived_coords if id(coord) not in scalar_coord_ids + ] + + # Ancillary Variables + vector_ancillary_variables = [] + scalar_ancillary_variables = [] + for av, av_dims in cube._ancillary_variables_and_dims: + if av_dims: + vector_ancillary_variables.append(av) + else: + scalar_ancillary_variables.append(av) + + # Cell Measures + vector_cell_measures = [] + scalar_cell_measures = [] + for cm, cm_dims in cube._cell_measures_and_dims: + if cm_dims: + vector_cell_measures.append(cm) + else: + scalar_cell_measures.append(cm) + + # Sort scalar coordinates by name. + scalar_coords.sort(key=lambda coord: coord.name()) + # Sort vector coordinates by data dimension and name. + vector_dim_coords.sort(key=lambda coord: (cube.coord_dims(coord), coord.name())) + vector_aux_coords.sort(key=lambda coord: (cube.coord_dims(coord), coord.name())) + vector_derived_coords.sort( + key=lambda coord: (cube.coord_dims(coord), coord.name()) + ) + + self.vector_sections = {} + + def add_vector_section(title, contents, iscoord=True): + self.vector_sections[title] = VectorSection(title, cube, contents, iscoord) + + add_vector_section("Dimension coordinates:", vector_dim_coords) + add_vector_section("Mesh coordinates:", mesh_coords) + add_vector_section("Auxiliary coordinates:", vector_aux_coords) + add_vector_section("Derived coordinates:", vector_derived_coords) + add_vector_section("Cell measures:", vector_cell_measures, False) + add_vector_section("Ancillary variables:", vector_ancillary_variables, False) + + self.scalar_sections = {} + + def add_scalar_section(section_class, title, *args): + self.scalar_sections[title] = section_class(title, *args) + + add_scalar_section(ScalarMeshSection, "Mesh:", cube) + + add_scalar_section( + ScalarCoordSection, "Scalar coordinates:", cube, scalar_coords + ) + add_scalar_section( + ScalarCellMeasureSection, + "Scalar cell measures:", + scalar_cell_measures, + ) + add_scalar_section( + ScalarAncillaryVariableSection, + "Scalar ancillary variables:", + scalar_ancillary_variables, + ) + add_scalar_section(CellMethodSection, "Cell methods:", cube.cell_methods) + add_scalar_section(AttributeSection, "Attributes:", cube.attributes) diff --git a/lib/iris/_shapefiles.py b/lib/iris/_shapefiles.py new file mode 100644 index 0000000000..74b24b6627 --- /dev/null +++ b/lib/iris/_shapefiles.py @@ -0,0 +1,243 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. + +# Much of this code is originally based off the ASCEND library, developed in +# the Met Office by Chris Kent, Emilie Vanvyve, David Bentley, Joana Mendes +# many thanks to them. Converted to iris by Alex Chamberlain-Clay + + +from itertools import product +import warnings + +import numpy as np +import shapely +import shapely.errors +import shapely.geometry as sgeom +import shapely.ops + +from iris.warnings import IrisDefaultingWarning, IrisUserWarning + + +def create_shapefile_mask( + geometry, + cube, + minimum_weight=0.0, +): + """Make a mask for a cube from a shape. + + Get the mask of the intersection between the + given shapely geometry and cube with x/y DimCoords. + Can take a minimum weight and evaluate area overlaps instead + + Parameters + ---------- + geometry : :class:`shapely.Geometry` + cube : :class:`iris.cube.Cube` + A :class:`~iris.cube.Cube` which has 1d x and y coordinates. + minimum_weight : float, default 0.0 + A float between 0 and 1 determining what % of a cell + a shape must cover for the cell to remain unmasked. + eg: 0.1 means that at least 10% of the shape overlaps the cell + to be unmasked. + Requires geometry to be a Polygon or MultiPolygon + Defaults to 0.0 (eg only test intersection). + + Returns + ------- + :class:`np.array` + An array of the shape of the x & y coordinates of the cube, with points + to mask equal to True. + + """ + from iris.cube import Cube, CubeList + + try: + msg = "Geometry is not a valid Shapely object" + if not shapely.is_valid(geometry): + raise TypeError(msg) + except Exception: + raise TypeError(msg) + if not isinstance(cube, Cube): + if isinstance(cube, CubeList): + msg = "Received CubeList object rather than Cube - \ + to mask a CubeList iterate over each Cube" + raise TypeError(msg) + else: + msg = "Received non-Cube object where a Cube is expected" + raise TypeError(msg) + if minimum_weight > 0.0 and isinstance( + geometry, + ( + sgeom.Point, + sgeom.LineString, + sgeom.LinearRing, + sgeom.MultiPoint, + sgeom.MultiLineString, + ), + ): + minimum_weight = 0.0 + warnings.warn( + """Shape is of invalid type for minimum weight masking, + must use a Polygon rather than Line shape.\n + Masking based off intersection instead. """, + category=IrisDefaultingWarning, + ) + + # prepare 2D cube + y_name, x_name = _cube_primary_xy_coord_names(cube) + cube_2d = cube.slices([y_name, x_name]).next() + for coord in cube_2d.dim_coords: + if not coord.has_bounds(): + coord.guess_bounds() + trans_geo = _transform_coord_system(geometry, cube_2d) + + y_coord, x_coord = [cube_2d.coord(n) for n in (y_name, x_name)] + x_bounds = _get_mod_rebased_coord_bounds(x_coord) + y_bounds = _get_mod_rebased_coord_bounds(y_coord) + # prepare array for dark + box_template = [ + sgeom.box(x[0], y[0], x[1], y[1]) for x, y in product(x_bounds, y_bounds) + ] + # shapely can do lazy evaluation of intersections if it's given a list of grid box shapes + # delayed lets us do it in parallel + intersect_template = shapely.intersects(trans_geo, box_template) + # we want areas not under shapefile to be True (to mask) + intersect_template = np.invert(intersect_template) + # now calc area overlaps if doing weights and adjust mask + if minimum_weight > 0.0: + intersections = np.array(box_template)[~intersect_template] + intersect_template[~intersect_template] = [ + trans_geo.intersection(box).area / box.area <= minimum_weight + for box in intersections + ] + mask_template = np.reshape(intersect_template, cube_2d.shape[::-1]).T + return mask_template + + +def _transform_coord_system(geometry, cube, geometry_system=None): + """Project the shape onto another coordinate system. + + Parameters + ---------- + geometry : :class:`shapely.Geometry` + cube : :class:`iris.cube.Cube` + :class:`~iris.cube.Cube` with the coord_system to be projected to and + a x coordinate. + geometry_system : :class:`iris.coord_systems`, optional + A :class:`~iris.coord_systems` object describing + the coord_system of the shapefile. Defaults to None, + which is treated as GeogCS. + + Returns + ------- + :class:`shapely.Geometry` + A transformed copy of the provided :class:`shapely.Geometry`. + + """ + y_name, x_name = _cube_primary_xy_coord_names(cube) + import iris.analysis.cartography + + DEFAULT_CS = iris.coord_systems.GeogCS( + iris.analysis.cartography.DEFAULT_SPHERICAL_EARTH_RADIUS + ) + target_system = cube.coord_system() + if not target_system: + warnings.warn( + "Cube has no coord_system; using default GeogCS lat/lon", + category=IrisDefaultingWarning, + ) + target_system = DEFAULT_CS + if geometry_system is None: + geometry_system = DEFAULT_CS + target_proj = target_system.as_cartopy_projection() + source_proj = geometry_system.as_cartopy_projection() + + trans_geometry = target_proj.project_geometry(geometry, source_proj) + # A GeogCS in iris can be either -180 to 180 or 0 to 360. If cube is 0-360, shift geom to match + if ( + isinstance(target_system, iris.coord_systems.GeogCS) + and cube.coord(x_name).points[-1] > 180 + ): + # chop geom at 0 degree line very finely then transform + prime_meridian_line = shapely.LineString([(0, 90), (0, -90)]) + trans_geometry = trans_geometry.difference(prime_meridian_line.buffer(0.00001)) + trans_geometry = shapely.transform(trans_geometry, _trans_func) + + if (not isinstance(target_system, iris.coord_systems.GeogCS)) and cube.coord( + x_name + ).points[-1] > 180: + # this may lead to incorrect masking or not depending on projection type so warn user + warnings.warn( + """Cube has x-coordinates over 180E and a non-standard projection type.\n + This may lead to incorrect masking. \n + If the result is not as expected, you might want to transform the x coordinate points of your cube to -180-180 """, + category=IrisUserWarning, + ) + return trans_geometry + + +def _trans_func(geometry): + """Pocket function for transforming the x coord of a geometry from -180 to 180 to 0-360.""" + for point in geometry: + if point[0] < 0: + point[0] = 360 - np.abs(point[0]) + return geometry + + +def _cube_primary_xy_coord_names(cube): + """Return the primary latitude and longitude coordinate names, or long names, from a cube. + + Parameters + ---------- + cube : :class:`iris.cube.Cube` + + Returns + ------- + tuple of str + The names of the primary latitude and longitude coordinates. + + """ + latc = ( + cube.coords(axis="y", dim_coords=True)[0] + if cube.coords(axis="y", dim_coords=True) + else -1 + ) + lonc = ( + cube.coords(axis="x", dim_coords=True)[0] + if cube.coords(axis="x", dim_coords=True) + else -1 + ) + + if -1 in (latc, lonc): + msg = "Error retrieving 1d xy coordinates in cube: {!r}" + raise ValueError(msg.format(cube)) + + latitude = latc.name() + longitude = lonc.name() + return latitude, longitude + + +def _get_mod_rebased_coord_bounds(coord): + """Take in a coord and returns a array of the bounds of that coord rebased to the modulus. + + Parameters + ---------- + coord : :class:`iris.coords.Coord` + An Iris coordinate with a modulus. + + Returns + ------- + :class:`np.array` + A 1d Numpy array of [start,end] pairs for bounds of the coord. + + """ + modulus = coord.units.modulus + # Force realisation (rather than core_bounds) - more efficient for the + # repeated indexing happening downstream. + result = np.array(coord.bounds) + if modulus: + result[result < 0.0] = (np.abs(result[result < 0.0]) % modulus) * -1 + result[np.isclose(result, modulus, 1e-10)] = 0.0 + return result diff --git a/lib/iris/analysis/__init__.py b/lib/iris/analysis/__init__.py index 9613550929..215d6dff0a 100644 --- a/lib/iris/analysis/__init__.py +++ b/lib/iris/analysis/__init__.py @@ -1,21 +1,8 @@ -# (C) British Crown Copyright 2010 - 2019, Met Office +# Copyright Iris contributors # -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . -""" -A package providing :class:`iris.cube.Cube` analysis support. +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""A package providing :class:`iris.cube.Cube` analysis support. This module defines a suite of :class:`~iris.analysis.Aggregator` instances, which are used to specify the statistical measure to calculate over a @@ -38,52 +25,80 @@ The gallery contains several interesting worked examples of how an :class:`~iris.analysis.Aggregator` may be used, including: - * :ref:`Meteorology-COP_1d_plot` - * :ref:`General-SOI_filtering` - * :ref:`Meteorology-hovmoller` - * :ref:`Meteorology-lagged_ensemble` - * :ref:`General-custom_aggregation` +* :ref:`sphx_glr_generated_gallery_meteorology_plot_COP_1d.py` +* :ref:`sphx_glr_generated_gallery_general_plot_SOI_filtering.py` +* :ref:`sphx_glr_generated_gallery_meteorology_plot_hovmoller.py` +* :ref:`sphx_glr_generated_gallery_meteorology_plot_lagged_ensemble.py` +* :ref:`sphx_glr_generated_gallery_general_plot_custom_aggregation.py` """ -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa -import six +from __future__ import annotations -from collections import OrderedDict -try: # Python 3 - from collections.abc import Iterable -except ImportError: # Python 2.7 - from collections import Iterable +from collections.abc import Iterable, Sequence +import functools from functools import wraps +from inspect import getfullargspec +import itertools +from numbers import Number +from typing import Optional, Protocol, Union +import warnings +from cf_units import Unit import dask.array as da import numpy as np import numpy.ma as ma import scipy.interpolate import scipy.stats.mstats +import iris._lazy_data from iris.analysis._area_weighted import AreaWeightedRegridder -from iris.analysis._interpolation import (EXTRAPOLATION_MODES, - RectilinearInterpolator) -from iris.analysis._regrid import RectilinearRegridder, CurvilinearRegridder +from iris.analysis._interpolation import EXTRAPOLATION_MODES, RectilinearInterpolator +from iris.analysis._regrid import CurvilinearRegridder, RectilinearRegridder import iris.coords +from iris.coords import AuxCoord, DimCoord, _DimensionalMetadata from iris.exceptions import LazyAggregatorError -import iris._lazy_data - -__all__ = ('COUNT', 'GMEAN', 'HMEAN', 'MAX', 'MEAN', 'MEDIAN', 'MIN', - 'PEAK', 'PERCENTILE', 'PROPORTION', 'RMS', 'STD_DEV', 'SUM', - 'VARIANCE', 'WPERCENTILE', 'coord_comparison', 'Aggregator', - 'WeightedAggregator', 'clear_phenomenon_identity', 'Linear', - 'AreaWeighted', 'Nearest', 'UnstructuredNearest') +import iris.util + +__all__ = ( + "Aggregator", + "AreaWeighted", + "COUNT", + "GMEAN", + "HMEAN", + "Linear", + "MAX", + "MAX_RUN", + "MEAN", + "MEDIAN", + "MIN", + "Nearest", + "PEAK", + "PERCENTILE", + "PROPORTION", + "PercentileAggregator", + "PointInCell", + "RMS", + "STD_DEV", + "SUM", + "UnstructuredNearest", + "VARIANCE", + "WPERCENTILE", + "WeightedAggregator", + "WeightedPercentileAggregator", + "clear_phenomenon_identity", + "create_weighted_aggregator_fn", +) + + +class _CoordGroup: + """Represents a list of coordinates, one for each given cube. - -class _CoordGroup(object): - """ Represents a list of coordinates, one for each given cube. Which can be operated on conveniently. """ + def __init__(self, coords, cubes): self.coords = coords self.cubes = cubes @@ -95,25 +110,35 @@ def __getitem__(self, key): return list(self).__getitem__(key) def _first_coord_w_cube(self): - """ + """Return the first none None coordinate. + Return the first none None coordinate, and its associated cube as (cube, coord). """ - return next(filter(lambda cube_coord: cube_coord[1] is not None, - zip(self.cubes, self.coords))) + return next( + filter( + lambda cube_coord: cube_coord[1] is not None, + zip(self.cubes, self.coords), + ) + ) def __repr__(self): # No exact repr, so a helpful string is given instead - return '[' + ', '.join([coord.name() if coord is not None - else 'None' for coord in self]) + ']' + return ( + "[" + + ", ".join( + [coord.name() if coord is not None else "None" for coord in self] + ) + + "]" + ) def name(self): _, first_coord = self._first_coord_w_cube() return first_coord.name() def _oid_tuple(self): - """Return a tuple of object ids for this _CoordGroup's coordinates""" + """Return a tuple of object ids for this _CoordGroup's coordinates.""" return tuple((id(coord) for coord in self)) def __hash__(self): @@ -132,7 +157,8 @@ def __eq__(self, other): return result def matches(self, predicate, default_val=True): - """ + """Apply a function to a coord group returning a list of bools. + Apply a function to a coord group returning a list of bools for each coordinate. @@ -149,7 +175,8 @@ def matches(self, predicate, default_val=True): yield predicate(cube, coord) def matches_all(self, predicate): - """ + """Return whether all coordinates match the given function. + Return whether all coordinates match the given function after running it through :meth:`matches`. @@ -159,7 +186,8 @@ def matches_all(self, predicate): return all(self.matches(predicate)) def matches_any(self, predicate): - """ + """Return whether any coordinates match the given function. + Return whether any coordinates match the given function after running it through :meth:`matches`. @@ -169,61 +197,103 @@ def matches_any(self, predicate): return any(self.matches(predicate)) -def coord_comparison(*cubes): - """ - Convenience function to help compare coordinates on one or more cubes - by their metadata. - - Return a dictionary where the key represents the statement, - "Given these cubes list the coordinates which, - when grouped by metadata, are/have..." - - Keys: - - * grouped_coords - A list of coordinate groups of all the coordinates grouped together - by their coordinate definition - * ungroupable - A list of coordinate groups which contain at least one None, - meaning not all Cubes provide an equivalent coordinate - * not_equal - A list of coordinate groups of which not all are equal - (superset of ungroupable) - * no_data_dimension - A list of coordinate groups of which all have no data dimensions on - their respective cubes - * scalar - A list of coordinate groups of which all have shape (1, ) - * non_equal_data_dimension - A list of coordinate groups of which not all have the same - data dimension on their respective cubes - * non_equal_shape - A list of coordinate groups of which not all have the same shape - * equal_data_dimension - A list of coordinate groups of which all have the same data dimension - on their respective cubes - * equal - A list of coordinate groups of which all are equal - * ungroupable_and_dimensioned - A list of coordinate groups of which not all cubes had an equivalent - (in metadata) coordinate which also describe a data dimension - * dimensioned - A list of coordinate groups of which all describe a data dimension on - their respective cubes - * ignorable - A list of scalar, ungroupable non_equal coordinate groups - * resamplable - A list of equal, different data dimensioned coordinate groups - * transposable - A list of non equal, same data dimensioned, non scalar coordinate groups - - Example usage:: - - result = coord_comparison(cube1, cube2) +def _dimensional_metadata_comparison(*cubes, object_get=None): + """Help compare coordinates. + + Convenience function to help compare coordinates, cell-measures or + ancillary-variables, on one or more cubes, by their metadata. + + .. note:: + + Up to Iris 2.x, this _used_ to be the public API method + "iris.analysis.coord_comparison". + It has since been generalised, and made private. + However, the cube elements handled are still mostly referred to as 'coords' / + 'coordinates' throughout, for simplicity : In fact, they will all be either + `iris.coords.Coord`, `iris.coords.CellMeasure` or + `iris.coords.AncillaryVariable`, the cube element type being controlled by the + 'object_get' keyword. + + Parameters + ---------- + cubes : iterable of `iris.cube.Cube` + A set of cubes whose coordinates, cell-measures or ancillary-variables are to + be compared. + object_get : callable(cube) or None, optional + If not None, this must be a cube method returning a list of all cube elements + of the required type, i.e. one of `iris.cube.Cube.coords`, + `iris.cube.Cube.cell_measures`, or `iris.cube.Cube.ancillary_variables`. + If not specified, defaults to `iris.cube.Cube.coords`. + + Returns + ------- + (dict mapping str, list of _CoordGroup) + A dictionary whose keys are match categories and values are groups of + coordinates, cell-measures or ancillary-variables. + + The values of the returned dictionary are lists of _CoordGroup representing + grouped coordinates. Each _CoordGroup contains all the input 'cubes', and a + matching list of the coord within each cube that matches some specific CoordDefn + (or maybe None). + + The keys of the returned dictionary are strings naming 'categories' : Each + represents a statement, + "Given these cubes list the coordinates which, + when grouped by metadata, are/have..." + + Returned Keys: + + * **grouped_coords**. + A list of coordinate groups of all the coordinates grouped together + by their coordinate definition + * **ungroupable**. + A list of coordinate groups which contain at least one None, + meaning not all Cubes provide an equivalent coordinate + * **not_equal**. + A list of coordinate groups of which not all are equal + (superset of ungroupable) + * **no_data_dimension**> + A list of coordinate groups of which all have no data dimensions on + their respective cubes + * **scalar**> + A list of coordinate groups of which all have shape (1, ) + * **non_equal_data_dimension**. + A list of coordinate groups of which not all have the same + data dimension on their respective cubes + * **non_equal_shape**. + A list of coordinate groups of which not all have the same shape + * **equal_data_dimension**. + A list of coordinate groups of which all have the same data dimension + on their respective cubes + * **equal**. + A list of coordinate groups of which all are equal + * **ungroupable_and_dimensioned**. + A list of coordinate groups of which not all cubes had an equivalent + (in metadata) coordinate which also describe a data dimension + * **dimensioned**. + A list of coordinate groups of which all describe a data dimension on + their respective cubes + * **ignorable**. + A list of scalar, ungroupable non_equal coordinate groups + * **resamplable**. + A list of equal, different data dimensioned coordinate groups + * **transposable**. + A list of non equal, same data dimensioned, non scalar coordinate groups + + Examples + -------- + :: + + result = _dimensional_metadata_comparison(cube1, cube2) print('All equal coordinates: ', result['equal']) """ - all_coords = [cube.coords() for cube in cubes] + if object_get is None: + from iris.cube import Cube + + object_get = Cube.coords + + all_coords = [object_get(cube) for cube in cubes] grouped_coords = [] # set of coordinates id()s of coordinates which have been processed @@ -233,7 +303,6 @@ def coord_comparison(*cubes): # for coordinate groups for cube, coords in zip(cubes, all_coords): for coord in coords: - # if this coordinate has already been processed, then continue on # to the next one if id(coord) in processed_coords: @@ -257,9 +326,11 @@ def coord_comparison(*cubes): for other_coord in all_coords[other_cube_i]: # for optimisation, check that the name is equivalent # *before* checking all of the metadata is equivalent - eq = (other_coord is coord or - other_coord.name() == coord.name() and - other_coord._as_defn() == coord._as_defn()) + eq = ( + other_coord is coord + or other_coord.name() == coord.name() + and other_coord.metadata == coord.metadata + ) if eq: coord_to_add_to_group = other_coord break @@ -315,7 +386,7 @@ def diff_shape_fn(cube, coord): # dimension on their respective cubes # (None -> group describes a different dimension) def diff_data_dim_fn(cube, coord): - return cube.coord_dims(coord) != first_cube.coord_dims(first_coord) + return coord.cube_dims(cube) != first_coord.cube_dims(first_cube) if coord_group.matches_any(diff_data_dim_fn): different_data_dimension.add(coord_group) @@ -323,7 +394,7 @@ def diff_data_dim_fn(cube, coord): # get all coordinate groups which don't describe a dimension # (None -> doesn't describe a dimension) def no_data_dim_fn(cube, coord): - return cube.coord_dims(coord) == () + return coord.cube_dims(cube) == () if coord_group.matches_all(no_data_dim_fn): no_data_dimension.add(coord_group) @@ -331,95 +402,89 @@ def no_data_dim_fn(cube, coord): # get all coordinate groups which don't describe a dimension # (None -> not a scalar coordinate) def no_data_dim_fn(cube, coord): - return coord.shape == (1, ) + return coord.shape == (1,) if coord_group.matches_all(no_data_dim_fn): scalar_coords.add(coord_group) result = {} - result['grouped_coords'] = set(grouped_coords) - result['not_equal'] = not_equal - result['ungroupable'] = ungroupable - result['no_data_dimension'] = no_data_dimension - result['scalar'] = scalar_coords - result['non_equal_data_dimension'] = different_data_dimension - result['non_equal_shape'] = different_shaped_coords - - result['equal_data_dimension'] = (result['grouped_coords'] - - result['non_equal_data_dimension']) - result['equal'] = result['grouped_coords'] - result['not_equal'] - result['dimensioned'] = (result['grouped_coords'] - - result['no_data_dimension']) - result['ungroupable_and_dimensioned'] = (result['ungroupable'] & - result['dimensioned']) - result['ignorable'] = ((result['not_equal'] | result['ungroupable']) & - result['no_data_dimension']) - result['resamplable'] = (result['not_equal'] & - result['equal_data_dimension'] - result['scalar']) - result['transposable'] = (result['equal'] & - result['non_equal_data_dimension']) + result["grouped_coords"] = set(grouped_coords) + result["not_equal"] = not_equal + result["ungroupable"] = ungroupable + result["no_data_dimension"] = no_data_dimension + result["scalar"] = scalar_coords + result["non_equal_data_dimension"] = different_data_dimension + result["non_equal_shape"] = different_shaped_coords + + result["equal_data_dimension"] = ( + result["grouped_coords"] - result["non_equal_data_dimension"] + ) + result["equal"] = result["grouped_coords"] - result["not_equal"] + result["dimensioned"] = result["grouped_coords"] - result["no_data_dimension"] + result["ungroupable_and_dimensioned"] = ( + result["ungroupable"] & result["dimensioned"] + ) + result["ignorable"] = (result["not_equal"] | result["ungroupable"]) & result[ + "no_data_dimension" + ] + result["resamplable"] = ( + result["not_equal"] & result["equal_data_dimension"] - result["scalar"] + ) + result["transposable"] = result["equal"] & result["non_equal_data_dimension"] # for convenience, turn all of the sets in the dictionary into lists, # sorted by the name of the group - for key, groups in six.iteritems(result): + for key, groups in result.items(): result[key] = sorted(groups, key=lambda group: group.name()) return result -class _Aggregator(object): - """ - The :class:`_Aggregator` base class provides common aggregation - functionality. +class _Aggregator: + """Base class provides common aggregation functionality.""" - """ - def __init__(self, cell_method, call_func, units_func=None, - lazy_func=None, **kwargs): - """ - Create an aggregator for the given :data:`call_func`. + def __init__( + self, cell_method, call_func, units_func=None, lazy_func=None, **kwargs + ): + r"""Create an aggregator for the given :data:`call_func`. - Args: + Aggregators are used by cube aggregation methods such as + :meth:`~iris.cube.Cube.collapsed` and + :meth:`~iris.cube.Cube.aggregated_by`. For example:: - * cell_method (string): - Cell method definition formatter. Used in the fashion - "cell_method.format(\**kwargs)", to produce a cell-method string - which can include keyword values. + result = cube.collapsed('longitude', iris.analysis.MEAN) - * call_func (callable): - | *Call signature*: (data, axis=None, \**kwargs) + A variety of ready-made aggregators are provided in this module, such + as :data:`~iris.analysis.MEAN` and :data:`~iris.analysis.MAX`. Custom + aggregators can also be created for special purposes, see + :ref:`sphx_glr_generated_gallery_general_plot_custom_aggregation.py` + for a worked example. + Parameters + ---------- + cell_method : str + Cell method definition formatter. Used in the fashion + ``cell_method.format(**kwargs)``, to produce a cell-method string + which can include keyword values. + call_func : callable + Call signature: ``(data, axis=None, **kwargs)``. Data aggregation function. Returns an aggregation result, collapsing the 'axis' dimension of the 'data' argument. - - Kwargs: - - * units_func (callable): - | *Call signature*: (units) - + units_func : callable, optional + Call signature: `(units, **kwargs)`. If provided, called to convert a cube's units. Returns an :class:`cf_units.Unit`, or a value that can be made into one. - - * lazy_func (callable or None): + To ensure backwards-compatibility, also accepts a callable with + call signature (units). + lazy_func : callable or None, optional An alternative to :data:`call_func` implementing a lazy aggregation. Note that, it need not support all features of the main operation, but should raise an error in unhandled cases. - - Additional kwargs:: - Passed through to :data:`call_func` and :data:`lazy_func`. - - Aggregators are used by cube aggregation methods such as - :meth:`~iris.cube.Cube.collapsed` and - :meth:`~iris.cube.Cube.aggregated_by`. For example:: - - result = cube.collapsed('longitude', iris.analysis.MEAN) - - A variety of ready-made aggregators are provided in this module, such - as :data:`~iris.analysis.MEAN` and :data:`~iris.analysis.MAX`. Custom - aggregators can also be created for special purposes, see - :ref:`General-custom_aggregation` for a worked example. - + **kwargs : dict, optional + Passed through to :data:`call_func`, :data:`lazy_func`, and + :data:`units_func`. """ #: Cube cell method string. self.cell_method = cell_method @@ -434,7 +499,8 @@ def __init__(self, cell_method, call_func, units_func=None, self._kwargs = kwargs def lazy_aggregate(self, data, axis, **kwargs): - """ + """Perform aggregation over the data with a lazy operation. + Perform aggregation over the data with a lazy operation, analogous to the 'aggregate' result. @@ -443,29 +509,26 @@ def lazy_aggregate(self, data, axis, **kwargs): This function is usually used in conjunction with update_metadata(), which should be passed the same keyword arguments. - Args: - - * data (array): - A lazy array (:class:`dask.array.Array`). - - * axis (int or list of int): + Parameters + ---------- + data : :class:`dask.array.Array` + A lazy array. + axis : int or list of int The dimensions to aggregate over -- note that this is defined differently to the 'aggregate' method 'axis' argument, which only accepts a single dimension index. - - Kwargs: - - * kwargs: + **kwargs : dict, optional All keyword arguments are passed through to the data aggregation function. - Returns: - A lazy array representing the aggregation operation - (:class:`dask.array.Array`). + Returns + ------- + :class:`dask.array.Array` + A lazy array representing the aggregation operation. """ if self.lazy_func is None: - msg = '{} aggregator does not support lazy operation.' + msg = "{} aggregator does not support lazy operation." raise LazyAggregatorError(msg.format(self.name())) # Combine keyword args with `kwargs` taking priority over those @@ -475,46 +538,47 @@ def lazy_aggregate(self, data, axis, **kwargs): return self.lazy_func(data, axis=axis, **kwargs) def aggregate(self, data, axis, **kwargs): - """ - Perform the aggregation function given the data. + """Perform the aggregation function given the data. Keyword arguments are passed through to the data aggregation function (for example, the "percent" keyword for a percentile aggregator). This function is usually used in conjunction with update_metadata(), which should be passed the same keyword arguments. - Args: - - * data (array): + Parameters + ---------- + data : array Data array. - - * axis (int): + axis : int Axis to aggregate over. - - Kwargs: - - * mdtol (float): + mdtol : float, optional Tolerance of missing data. The value returned will be masked if the fraction of data to missing data is less than or equal to mdtol. mdtol=0 means no missing data is tolerated while mdtol=1 will return the resulting value from the aggregation function. Defaults to 1. - - * kwargs: + **kwargs : dict, optional All keyword arguments apart from those specified above, are passed through to the data aggregation function. - Returns: - The aggregated data. + Returns + ------- + The aggregated data. """ kwargs = dict(list(self._kwargs.items()) + list(kwargs.items())) - mdtol = kwargs.pop('mdtol', None) + mdtol = kwargs.pop("mdtol", None) result = self.call_func(data, axis=axis, **kwargs) - if (mdtol is not None and ma.isMaskedArray(data)): + if mdtol is not None and ma.is_masked(data) and result is not ma.masked: fraction_not_missing = data.count(axis=axis) / data.shape[axis] - mask_update = 1 - mdtol > fraction_not_missing + mask_update = np.array(1 - mdtol > fraction_not_missing) + if np.array(result).ndim > mask_update.ndim: + # call_func created trailing dimension. + mask_update = np.broadcast_to( + mask_update.reshape(mask_update.shape + (1,)), + np.array(result).shape, + ) if ma.isMaskedArray(result): result.mask = result.mask | mask_update else: @@ -523,193 +587,227 @@ def aggregate(self, data, axis, **kwargs): return result def update_metadata(self, cube, coords, **kwargs): - """ - Update common cube metadata w.r.t the aggregation function. - - Args: + """Update common cube metadata w.r.t the aggregation function. - * cube (:class:`iris.cube.Cube`): + Parameters + ---------- + cube : :class:`iris.cube.Cube` Source cube that requires metadata update. - * coords (:class:`iris.coords.Coord`): + coords : :class:`iris.coords.Coord` The one or more coordinates that were aggregated. - - Kwargs: - - * This function is intended to be used in conjuction with aggregate() - and should be passed the same keywords (for example, the "ddof" - keyword for a standard deviation aggregator). + **kwargs : dict, optional + This function is intended to be used in conjunction with aggregate() + and should be passed the same keywords (for example, the "ddof" + keyword for a standard deviation aggregator). """ # Update the units if required. if self.units_func is not None: - cube.units = self.units_func(cube.units) - - def post_process(self, collapsed_cube, data_result, coords, **kwargs): - """ - Process the result from :func:`iris.analysis.Aggregator.aggregate`. - - Args: - - * collapsed_cube: - A :class:`iris.cube.Cube`. - * data_result: - Result from :func:`iris.analysis.Aggregator.aggregate` - * coords: + argspec = getfullargspec(self.units_func) + if argspec.varkw is None: # old style + cube.units = self.units_func(cube.units) + else: # new style (preferred) + cube.units = self.units_func(cube.units, **kwargs) + + def post_process( + self, collapsed_cube, data_result, coords, **kwargs + ): # numpydoc ignore=SS05 + """Process the result from :func:`iris.analysis.Aggregator.aggregate`. + + Parameters + ---------- + collapsed_cube : :class:`iris.cube.Cube` + data_result : + Result from :func:`iris.analysis.Aggregator.aggregate`. + coords : The one or more coordinates that were aggregated over. + **kwargs : dict, optional + This function is intended to be used in conjunction with aggregate() + and should be passed the same keywords (for example, the "ddof" + keyword from a standard deviation aggregator). - Kwargs: - - * This function is intended to be used in conjunction with aggregate() - and should be passed the same keywords (for example, the "ddof" - keyword from a standard deviation aggregator). - - Returns: - The collapsed cube with its aggregated data payload. + Returns + ------- + The collapsed cube with its aggregated data payload. """ collapsed_cube.data = data_result return collapsed_cube def aggregate_shape(self, **kwargs): - """ - The shape of the new dimension/s created by the aggregator. - - Kwargs: + """Shape of the new dimension/s created by the aggregator. - * This function is intended to be used in conjunction with aggregate() - and should be passed the same keywords. + Parameters + ---------- + **kwargs : dict, optional + This function is intended to be used in conjunction with aggregate() + and should be passed the same keywords. - Returns: - A tuple of the aggregate shape. + Returns + ------- + A tuple of the aggregate shape. """ return () def name(self): - """ - Returns the name of the aggregator. - - """ + """Return the name of the aggregator.""" try: - name = '_'.join(self.cell_method.split()) + name = "_".join(self.cell_method.split()) except AttributeError: - name = 'unknown' + name = "unknown" return name class PercentileAggregator(_Aggregator): - """ - The :class:`PercentileAggregator` class provides percentile aggregation - functionality. + """Provide percentile aggregation functionality. This aggregator *may* introduce a new dimension to the data for the statistic being calculated, but only if more than one quantile is required. For example, calculating the 50th and 90th percentile will result in a new data dimension with an extent of 2, for each of the quantiles calculated. + This aggregator can used by cube aggregation methods such as + :meth:`~iris.cube.Cube.collapsed` and + :meth:`~iris.cube.Cube.aggregated_by`. For example:: + + cube.collapsed('longitude', iris.analysis.PERCENTILE, percent=50) + """ - def __init__(self, units_func=None, lazy_func=None, **kwargs): - """ - Create a percentile aggregator. - Kwargs: + def __init__(self, units_func=None, **kwargs): + r"""Create a percentile aggregator. - * units_func (callable): - | *Call signature*: (units) + Parameters + ---------- + units_func : callable, optional + Call signature: ``(units, **kwargs)``. If provided, called to convert a cube's units. Returns an :class:`cf_units.Unit`, or a value that can be made into one. + To ensure backwards-compatibility, also accepts a callable with + call signature (units). + **kwargs : dict, optional + Passed through to :data:`call_func`, :data:`lazy_func`, and + :data:`units_func`. + + """ + self._name = "percentile" + self._args = ["percent"] + _Aggregator.__init__( + self, + None, + _percentile, + units_func=units_func, + lazy_func=_build_dask_mdtol_function(_percentile), + **kwargs, + ) + + def _base_aggregate(self, data, axis, lazy, **kwargs): + """Avoid duplication of checks in aggregate and lazy_aggregate.""" + msg = "{} aggregator requires the mandatory keyword argument {!r}." + for arg in self._args: + if arg not in kwargs: + raise ValueError(msg.format(self.name(), arg)) - * lazy_func (callable or None): - An alternative to :data:`call_func` implementing a lazy - aggregation. Note that, it need not support all features of the - main operation, but should raise an error in unhandled cases. - - Additional kwargs:: - Passed through to :data:`call_func` and :data:`lazy_func`. - - This aggregator can used by cube aggregation methods such as - :meth:`~iris.cube.Cube.collapsed` and - :meth:`~iris.cube.Cube.aggregated_by`. For example:: - - cube.collapsed('longitude', iris.analysis.PERCENTILE, percent=50) + if kwargs.get("fast_percentile_method", False) and ( + kwargs.get("mdtol", 1) != 0 + ): + kwargs["error_on_masked"] = True - """ - self._name = 'percentile' - self._args = ['percent'] - _Aggregator.__init__(self, None, _percentile, - units_func=units_func, lazy_func=lazy_func, - **kwargs) + if lazy: + return _Aggregator.lazy_aggregate(self, data, axis, **kwargs) + else: + return _Aggregator.aggregate(self, data, axis, **kwargs) def aggregate(self, data, axis, **kwargs): - """ - Perform the percentile aggregation over the given data. + """Perform the percentile aggregation over the given data. Keyword arguments are passed through to the data aggregation function (for example, the "percent" keyword for a percentile aggregator). This function is usually used in conjunction with update_metadata(), which should be passed the same keyword arguments. - Args: - - * data (array): + Parameters + ---------- + data : array Data array. - - * axis (int): + axis : int Axis to aggregate over. - - Kwargs: - - * mdtol (float): + mdtol : float, optional Tolerance of missing data. The value returned will be masked if the fraction of data to missing data is less than or equal to mdtol. mdtol=0 means no missing data is tolerated while mdtol=1 will return the resulting value from the aggregation function. Defaults to 1. - - * kwargs: + **kwargs : dict, optional All keyword arguments apart from those specified above, are passed through to the data aggregation function. - Returns: - The aggregated data. + Returns + ------- + The aggregated data. """ + return self._base_aggregate(data, axis, lazy=False, **kwargs) - msg = '{} aggregator requires the mandatory keyword argument {!r}.' - for arg in self._args: - if arg not in kwargs: - raise ValueError(msg.format(self.name(), arg)) + def lazy_aggregate(self, data, axis, **kwargs): + """Perform aggregation over the data with a lazy operation. - return _Aggregator.aggregate(self, data, axis, **kwargs) + Perform aggregation over the data with a lazy operation, analogous to + the 'aggregate' result. - def post_process(self, collapsed_cube, data_result, coords, **kwargs): - """ - Process the result from :func:`iris.analysis.Aggregator.aggregate`. + Keyword arguments are passed through to the data aggregation function + (for example, the "percent" keyword for a percentile aggregator). + This function is usually used in conjunction with update_metadata(), + which should be passed the same keyword arguments. - Args: + Parameters + ---------- + data : :class:`dask.array.Array` + A lazy array. + axis : int or list of int + The dimensions to aggregate over -- note that this is defined + differently to the 'aggregate' method 'axis' argument, which only + accepts a single dimension index. + **kwargs : dict, optional + All keyword arguments are passed through to the data aggregation + function. - * collapsed_cube: - A :class:`iris.cube.Cube`. - * data_result: - Result from :func:`iris.analysis.Aggregator.aggregate` - * coords: - The one or more coordinates that were aggregated over. + Returns + ------- + :class:`dask.array.Array` + A lazy array representing the result of the aggregation operation. - Kwargs: + """ + return self._base_aggregate(data, axis, lazy=True, **kwargs) - * This function is intended to be used in conjunction with aggregate() - and should be passed the same keywords (for example, the "percent" - keywords from a percentile aggregator). + def post_process( + self, collapsed_cube, data_result, coords, **kwargs + ): # numpydoc ignore=SS05 + """Process the result from :func:`iris.analysis.Aggregator.aggregate`. - Returns: - The collapsed cube with it's aggregated data payload. + Parameters + ---------- + collapsed_cube : :class:`iris.cube.Cube` + data_result : + Result from :func:`iris.analysis.Aggregator.aggregate`. + coords : + The one or more coordinates that were aggregated over. + **kwargs : dict, optional + This function is intended to be used in conjunction with aggregate() + and should be passed the same keywords (for example, the "percent" + keywords from a percentile aggregator). + + Returns + ------- + The collapsed cube with it's aggregated data payload. """ cubes = iris.cube.CubeList() # The additive aggregator requires a mandatory keyword. - msg = '{} aggregator requires the mandatory keyword argument {!r}.' + msg = "{} aggregator requires the mandatory keyword argument {!r}." for arg in self._args: if arg not in kwargs: raise ValueError(msg.format(self.name(), arg)) @@ -717,7 +815,7 @@ def post_process(self, collapsed_cube, data_result, coords, **kwargs): points = kwargs[self._args[0]] # Derive the name of the additive coordinate. names = [coord.name() for coord in coords] - coord_name = '{}_over_{}'.format(self.name(), '_'.join(names)) + coord_name = "{}_over_{}".format(self.name(), "_".join(names)) if not isinstance(points, Iterable): points = [points] @@ -727,7 +825,7 @@ def post_process(self, collapsed_cube, data_result, coords, **kwargs): # order cube. for point in points: cube = collapsed_cube.copy() - coord = iris.coords.AuxCoord(point, long_name=coord_name) + coord = iris.coords.AuxCoord(point, long_name=coord_name, units="percent") cube.add_aux_coord(coord) cubes.append(cube) @@ -742,25 +840,26 @@ def post_process(self, collapsed_cube, data_result, coords, **kwargs): data_result = np.rollaxis(data_result, -1) # Marry the collapsed cube and the data payload together. - result = _Aggregator.post_process(self, collapsed_cube, data_result, - coords, **kwargs) + result = _Aggregator.post_process( + self, collapsed_cube, data_result, coords, **kwargs + ) return result def aggregate_shape(self, **kwargs): - """ - The shape of the additive dimension created by the aggregator. + """Shape of the additive dimension created by the aggregator. - Kwargs: + Parameters + ---------- + **kwargs : dict, optional + This function is intended to be used in conjunction with aggregate() + and should be passed the same keywords. - * This function is intended to be used in conjunction with aggregate() - and should be passed the same keywords. - - Returns: - A tuple of the additive dimension shape. + Returns + ------- + A tuple of the additive dimension shape. """ - - msg = '{} aggregator requires the mandatory keyword argument {!r}.' + msg = "{} aggregator requires the mandatory keyword argument {!r}." for arg in self._args: if arg not in kwargs: raise ValueError(msg.format(self.name(), arg)) @@ -779,17 +878,12 @@ def aggregate_shape(self, **kwargs): return shape def name(self): - """ - Returns the name of the aggregator. - - """ + """Return the name of the aggregator.""" return self._name class WeightedPercentileAggregator(PercentileAggregator): - """ - The :class:`WeightedPercentileAggregator` class provides percentile - aggregation functionality. + """Provides percentile aggregation functionality. This aggregator *may* introduce a new dimension to the data for the statistic being calculated, but only if more than one quantile is required. @@ -797,38 +891,59 @@ class WeightedPercentileAggregator(PercentileAggregator): data dimension with an extent of 2, for each of the quantiles calculated. """ - def __init__(self, units_func=None, lazy_func=None, **kwargs): - """ - Create a weighted percentile aggregator. - Kwargs: + def __init__(self, units_func=None, lazy_func=None, **kwargs): + r"""Create a weighted percentile aggregator. - * units_func (callable): - | *Call signature*: (units) + Parameters + ---------- + units_func : callable or None + | *Call signature*: ``(units, **kwargs)``. If provided, called to convert a cube's units. Returns an :class:`cf_units.Unit`, or a value that can be made into one. - * lazy_func (callable or None): + To ensure backwards-compatibility, also accepts a callable with + call signature (units). + + If the aggregator is used by a cube aggregation method (e.g., + :meth:`~iris.cube.Cube.collapsed`, + :meth:`~iris.cube.Cube.aggregated_by`, + :meth:`~iris.cube.Cube.rolling_window`), a keyword argument + `_weights_units` is provided to this function to allow updating + units based on the weights. `_weights_units` is determined from the + `weights` given to the aggregator (``None`` if no weights are + given). See :ref:`user guide ` + for an example of weighted aggregation that changes units. + lazy_func : callable or None An alternative to :data:`call_func` implementing a lazy aggregation. Note that, it need not support all features of the main operation, but should raise an error in unhandled cases. + **kwargs : dict, optional + Passed through to :data:`call_func`, :data:`lazy_func`, and + :data:`units_func`. - Additional kwargs:: - Passed through to :data:`call_func` and :data:`lazy_func`. - + Notes + ----- This aggregator can used by cube aggregation methods such as :meth:`~iris.cube.Cube.collapsed` and - :meth:`~iris.cube.Cube.aggregated_by`. For example:: + :meth:`~iris.cube.Cube.aggregated_by`. + + For example:: cube.collapsed('longitude', iris.analysis.WPERCENTILE, percent=50, weights=iris.analysis.cartography.area_weights(cube)) """ - _Aggregator.__init__(self, None, _weighted_percentile, - units_func=units_func, lazy_func=lazy_func, - **kwargs) + _Aggregator.__init__( + self, + None, + _weighted_percentile, + units_func=units_func, + lazy_func=lazy_func, + **kwargs, + ) self._name = "weighted_percentile" self._args = ["percent", "weights"] @@ -836,69 +951,65 @@ def __init__(self, units_func=None, lazy_func=None, **kwargs): #: A list of keywords associated with weighted behaviour. self._weighting_keywords = ["returned", "weights"] - def post_process(self, collapsed_cube, data_result, coords, **kwargs): - """ - Process the result from :func:`iris.analysis.Aggregator.aggregate`. + def post_process( + self, collapsed_cube, data_result, coords, **kwargs + ): # numpydoc ignore=SS05 + """Process the result from :func:`iris.analysis.Aggregator.aggregate`. Returns a tuple(cube, weights) if a tuple(data, weights) was returned from :func:`iris.analysis.Aggregator.aggregate`. - Args: - - * collapsed_cube: - A :class:`iris.cube.Cube`. - * data_result: - Result from :func:`iris.analysis.Aggregator.aggregate` - * coords: + Parameters + ---------- + collapsed_cube : :class:`iris.cube.Cube` + data_result : + Result from :func:`iris.analysis.Aggregator.aggregate`. + coords : The one or more coordinates that were aggregated over. - - Kwargs: - - * This function is intended to be used in conjunction with aggregate() - and should be passed the same keywords (for example, the "weights" - keyword). - - Returns: + **kwargs : dict, optional + This function is intended to be used in conjunction with aggregate() + and should be passed the same keywords (for example, the "weights" + keyword). + + Returns + ------- + collapsed cube The collapsed cube with it's aggregated data payload. Or a tuple pair of (cube, weights) if the keyword "returned" is specified and True. """ - if kwargs.get('returned', False): + if kwargs.get("returned", False): # Package the data into the cube and return a tuple collapsed_cube = PercentileAggregator.post_process( - self, collapsed_cube, data_result[0], coords, **kwargs) + self, collapsed_cube, data_result[0], coords, **kwargs + ) result = (collapsed_cube, data_result[1]) else: - result = PercentileAggregator.post_process(self, collapsed_cube, - data_result, coords, - **kwargs) + result = PercentileAggregator.post_process( + self, collapsed_cube, data_result, coords, **kwargs + ) return result class Aggregator(_Aggregator): - """ - The :class:`Aggregator` class provides common aggregation functionality. + """The :class:`Aggregator` class provides common aggregation functionality.""" - """ def update_metadata(self, cube, coords, **kwargs): - """ - Update cube cell method metadata w.r.t the aggregation function. + """Update cube cell method metadata w.r.t the aggregation function. - Args: - - * cube (:class:`iris.cube.Cube`): + Parameters + ---------- + cube : :class:`iris.cube.Cube` Source cube that requires metadata update. - * coords (:class:`iris.coords.Coord`): + coords : :class:`iris.coords.Coord` The one or more coordinates that were aggregated. - - Kwargs: - - * This function is intended to be used in conjuction with aggregate() - and should be passed the same keywords (for example, the "ddof" - keyword for a standard deviation aggregator). + **kwargs : dict, optional + This function is intended to be used in conjunction with aggregate() + and should be passed the same keywords (for example, the "ddof" + keyword for a standard deviation aggregator). """ _Aggregator.update_metadata(self, cube, coords, **kwargs) @@ -911,66 +1022,84 @@ def update_metadata(self, cube, coords, **kwargs): coord_names = [] for coord in coords: if not isinstance(coord, iris.coords.Coord): - raise TypeError('Coordinate instance expected to the ' - 'Aggregator object.') + raise TypeError( + "Coordinate instance expected to the Aggregator object." + ) coord_names.append(coord.name()) # Add a cell method. - method_name = self.cell_method.format(**kwargs) - cell_method = iris.coords.CellMethod(method_name, coord_names) - cube.add_cell_method(cell_method) + if self.cell_method is not None: + method_name = self.cell_method.format(**kwargs) + cell_method = iris.coords.CellMethod(method_name, coord_names) + cube.add_cell_method(cell_method) class WeightedAggregator(Aggregator): - """ - Convenience class that supports common weighted aggregation functionality. + """Convenience class that supports common weighted aggregation functionality.""" - """ - def __init__(self, cell_method, call_func, units_func=None, - lazy_func=None, **kwargs): - """ - Create a weighted aggregator for the given :data:`call_func`. - - Args: + def __init__( + self, cell_method, call_func, units_func=None, lazy_func=None, **kwargs + ): + r"""Create a weighted aggregator for the given :data:`call_func`. - * cell_method (string): + Parameters + ---------- + cell_method : str Cell method string that supports string format substitution. + call_func : callable + Data aggregation function. Call signature `(data, axis, + \**kwargs)`. + units_func : callable, optional + | *Call signature*: (units, \**kwargs) - * call_func (callable): - Data aggregation function. Call signature `(data, axis, **kwargs)`. - - Kwargs: - - * units_func (callable): - Units conversion function. - - * lazy_func (callable or None): + If provided, called to convert a cube's units. + Returns an :class:`cf_units.Unit`, or a + value that can be made into one. + To ensure backwards-compatibility, also accepts a callable with + call signature (units). + + If the aggregator is used by a cube aggregation method (e.g., + :meth:`~iris.cube.Cube.collapsed`, + :meth:`~iris.cube.Cube.aggregated_by`, + :meth:`~iris.cube.Cube.rolling_window`), a keyword argument + `_weights_units` is provided to this function to allow updating + units based on the weights. `_weights_units` is determined from the + `weights` given to the aggregator (``None`` if no weights are + given). See :ref:`user guide ` + for an example of weighted aggregation that changes units. + + lazy_func : callable, optional An alternative to :data:`call_func` implementing a lazy aggregation. Note that, it need not support all features of the main operation, but should raise an error in unhandled cases. - - Additional kwargs: - Passed through to :data:`call_func` and :data:`lazy_func`. + **kwargs : dict, optional + Passed through to :data:`call_func`, :data:`lazy_func`, and + :data:`units_func`. """ - Aggregator.__init__(self, cell_method, call_func, - units_func=units_func, lazy_func=lazy_func, - **kwargs) + Aggregator.__init__( + self, + cell_method, + call_func, + units_func=units_func, + lazy_func=lazy_func, + **kwargs, + ) #: A list of keywords that trigger weighted behaviour. self._weighting_keywords = ["returned", "weights"] def uses_weighting(self, **kwargs): - """ - Determine whether this aggregator uses weighting. + """Determine whether this aggregator uses weighting. - Kwargs: - - * kwargs: + Parameters + ---------- + **kwargs : dict, optional Arguments to filter of weighted keywords. - Returns: - Boolean. + Returns + ------- + bool """ result = False @@ -980,59 +1109,165 @@ def uses_weighting(self, **kwargs): break return result - def post_process(self, collapsed_cube, data_result, coords, **kwargs): - """ - Process the result from :func:`iris.analysis.Aggregator.aggregate`. + def post_process( + self, collapsed_cube, data_result, coords, **kwargs + ): # numpydoc ignore=SS05 + """Process the result from :func:`iris.analysis.Aggregator.aggregate`. Returns a tuple(cube, weights) if a tuple(data, weights) was returned from :func:`iris.analysis.Aggregator.aggregate`. - Args: - - * collapsed_cube: - A :class:`iris.cube.Cube`. - * data_result: - Result from :func:`iris.analysis.Aggregator.aggregate` - * coords: + Parameters + ---------- + collapsed_cube : :class:`iris.cube.Cube` + data_result : + Result from :func:`iris.analysis.Aggregator.aggregate`. + coords : The one or more coordinates that were aggregated over. - - Kwargs: - - * This function is intended to be used in conjunction with aggregate() - and should be passed the same keywords (for example, the "weights" - keywords from a mean aggregator). - - Returns: - The collapsed cube with it's aggregated data payload. Or a tuple + **kwargs : dict, optional + This function is intended to be used in conjunction with aggregate() + and should be passed the same keywords (for example, the "weights" + keywords from a mean aggregator). + + Returns + ------- + The collapsed cube + The collapsed cube with it's aggregated data payload. Or a tuple pair of (cube, weights) if the keyword "returned" is specified and True. """ - if kwargs.get('returned', False): + if kwargs.get("returned", False): # Package the data into the cube and return a tuple collapsed_cube.data, collapsed_weights = data_result result = (collapsed_cube, collapsed_weights) else: - result = Aggregator.post_process(self, collapsed_cube, - data_result, coords, **kwargs) + result = Aggregator.post_process( + self, collapsed_cube, data_result, coords, **kwargs + ) return result -def _build_dask_mdtol_function(dask_stats_function): +class _Weights: + """Class for handling weights for weighted aggregation. + + Provides the following two attributes: + + * ``array``: Lazy or non-lazy array of weights. + * ``units``: Units associated with the weights. + """ - Make a wrapped dask statistic function that supports the 'mdtol' keyword. + + def __init__(self, weights, cube): + """Initialize class instance. + + Parameters + ---------- + weights : cube, str, _DimensionalMetadata, array-like + If given as a :class:`iris.cube.Cube`, use its data and units. If + given as a :obj:`str` or :class:`iris.coords._DimensionalMetadata`, + assume this is (the name of) a + :class:`iris.coords._DimensionalMetadata` object of the cube (i.e., + one of :meth:`iris.cube.Cube.coords`, + :meth:`iris.cube.Cube.cell_measures`, or + :meth:`iris.cube.Cube.ancillary_variables`). If given as an + array-like object, use this directly and assume units of `1`. Note: + this does **not** create a copy of the input array. + cube : cube + Input cube for aggregation. If weights is given as :obj:`str` or + :class:`iris.coords._DimensionalMetadata`, try to extract the + :class:`iris.coords._DimensionalMetadata` object and corresponding + dimensional mappings from this cube. Otherwise, this argument is + ignored. + + """ + # `weights` is a cube + # Note: to avoid circular imports of Cube we use duck typing using the + # "hasattr" syntax here + # --> Extract data and units from cube + if hasattr(weights, "add_aux_coord"): + derived_array = weights.core_data() + derived_units = weights.units + + # `weights`` is a string or _DimensionalMetadata object + # --> Extract _DimensionalMetadata object from cube, broadcast it to + # correct shape using the corresponding dimensional mapping, and use + # its data and units + elif isinstance(weights, (str, _DimensionalMetadata)): + dim_metadata = cube._dimensional_metadata(weights) + derived_array = dim_metadata._core_values() + if dim_metadata.shape != cube.shape: + derived_array = iris.util.broadcast_to_shape( + derived_array, + cube.shape, + dim_metadata.cube_dims(cube), + ) + derived_units = dim_metadata.units + + # Remaining types (e.g., np.ndarray, dask.array.core.Array, etc.) + # --> Use array directly and assign units of "1" + else: + derived_array = weights + derived_units = Unit("1") + + self.array = derived_array + self.units = derived_units + + +def create_weighted_aggregator_fn(aggregator_fn, axis, **kwargs): + """Return an aggregator function that can explicitly handle weights. + + Parameters + ---------- + aggregator_fn : callable + An aggregator function, i.e., a callable that takes arguments ``data``, + ``axis`` and ``**kwargs`` and returns an array. Examples: + :meth:`Aggregator.aggregate`, :meth:`Aggregator.lazy_aggregate`. + This function should accept the keyword argument ``weights``. + axis : int + Axis to aggregate over. This argument is directly passed to + ``aggregator_fn``. + **kwargs : dict, optional + Arbitrary keyword arguments passed to ``aggregator_fn``. Should not + include ``weights`` (this will be removed if present). + + Returns + ------- + function + A function that takes two arguments ``data_arr`` and ``weights`` (both + should be an array of the same shape) and returns an array. + + """ + kwargs_copy = dict(kwargs) + kwargs_copy.pop("weights", None) + aggregator_fn = functools.partial(aggregator_fn, axis=axis, **kwargs_copy) + + def new_aggregator_fn(data_arr, weights): + """Weighted aggregation.""" + if weights is None: + return aggregator_fn(data_arr) + return aggregator_fn(data_arr, weights=weights) + + return new_aggregator_fn + + +def _build_dask_mdtol_function(dask_stats_function): + """Make a wrapped dask statistic function that supports the 'mdtol' keyword. 'dask_function' must be a dask statistical function, compatible with the call signature : "dask_stats_function(data, axis=axis, **kwargs)". It must be masked-data tolerant, i.e. it ignores masked input points and performs a calculation on only the unmasked points. - For example, mean([1, --, 2]) = (1 + 2) / 2 = 1.5. + For example, mean([1, --, 2]) = (1 + 2) / 2 = 1.5. If an additional + dimension is created by 'dask_function', it is assumed to be the trailing + one (as for '_percentile'). The returned value is a new function operating on dask arrays. It has the call signature `stat(data, axis=-1, mdtol=None, **kwargs)`. """ + @wraps(dask_stats_function) def inner_stat(array, axis=-1, mdtol=None, **kwargs): # Call the statistic to get the basic result (missing-data tolerant). @@ -1046,93 +1281,156 @@ def inner_stat(array, axis=-1, mdtol=None, **kwargs): points_per_calc = array.size / dask_result.size masked_point_fractions = point_mask_counts / points_per_calc boolean_mask = masked_point_fractions > mdtol + if dask_result.ndim > boolean_mask.ndim: + # dask_stats_function created trailing dimension. + boolean_mask = da.broadcast_to( + boolean_mask.reshape(boolean_mask.shape + (1,)), + dask_result.shape, + ) # Return an mdtol-masked version of the basic result. - result = da.ma.masked_array(da.ma.getdata(dask_result), - boolean_mask) + result = da.ma.masked_array(da.ma.getdata(dask_result), boolean_mask) return result + return inner_stat -def _percentile(data, axis, percent, fast_percentile_method=False, - **kwargs): +def _axis_to_single_trailing(stats_function): + """Given a statistical function that acts on the trailing axis. + + Given a statistical function that acts on the trailing axis of a 1D or 2D + array, wrap it so that higher dimension arrays can be passed, as well as any + axis as int or tuple. + """ - The percentile aggregator is an additive operation. This means that - it *may* introduce a new dimension to the data for the statistic being - calculated, but only if more than one percentile point is requested. + + @wraps(stats_function) + def inner_stat(data, axis, *args, **kwargs): + # Get data as a 1D or 2D view with the target axis as the trailing one. + if not isinstance(axis, Iterable): + axis = (axis,) + end = range(-len(axis), 0) + + data = np.moveaxis(data, axis, end) + shape = data.shape[: -len(axis)] # Shape of dims we won't collapse. + if shape: + data = data.reshape(np.prod(shape), -1) + else: + data = data.flatten() + + result = stats_function(data, *args, **kwargs) + + # Ensure to unflatten any leading dimensions. + if shape: + # Account for the additive dimension if necessary. + if result.size > np.prod(shape): + shape += (-1,) + result = result.reshape(shape) + + return result + + return inner_stat + + +def _calc_percentile(data, percent, fast_percentile_method=False, **kwargs): + """Calculate percentiles along the trailing axis of a 1D or 2D array.""" + if fast_percentile_method: + if kwargs.pop("error_on_masked", False): + msg = ( + "Cannot use fast np.percentile method with masked array unless" + " mdtol is 0." + ) + if ma.is_masked(data): + raise TypeError(msg) + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + "Warning: 'partition' will ignore the 'mask' of the MaskedArray.", + ) + result = np.percentile(data, percent, axis=-1, **kwargs) + + result = result.T + else: + quantiles = percent / 100.0 + for key in ["alphap", "betap"]: + kwargs.setdefault(key, 1) + result = scipy.stats.mstats.mquantiles(data, quantiles, axis=-1, **kwargs) + if not ma.isMaskedArray(data) and not ma.is_masked(result): + return np.asarray(result) + else: + return ma.MaskedArray(result) + + +@_axis_to_single_trailing +def _percentile(data, percent, fast_percentile_method=False, **kwargs): + """Percentile aggregator is an additive operation. + + This means that it *may* introduce a new dimension to the data for the + statistic being calculated, but only if more than one percentile point is + requested. If a new additive dimension is formed, then it will always be the last dimension of the resulting percentile data payload. - Kwargs: - - * fast_percentile_method (boolean) : + Parameters + ---------- + data : array-like + Array from which percentiles are to be calculated. + fast_percentile_method : bool, optional When set to True, uses the numpy.percentiles method as a faster alternative to the scipy.mstats.mquantiles method. Does not handle masked arrays. + **kwargs : dict, optional + Passed to scipy.stats.mstats.mquantiles if fast_percentile_method is + False. Otherwise passed to numpy.percentile. """ - # Ensure that the target axis is the last dimension. - data = np.rollaxis(data, axis, start=data.ndim) - shape = data.shape[:-1] - # Flatten any leading dimensions. - if shape: - data = data.reshape([np.prod(shape), data.shape[-1]]) - # Perform the percentile calculation. - if fast_percentile_method: - msg = 'Cannot use fast np.percentile method with masked array.' - if ma.isMaskedArray(data): - raise TypeError(msg) - result = np.percentile(data, percent, axis=-1) - result = result.T - else: - quantiles = np.array(percent) / 100. - result = scipy.stats.mstats.mquantiles(data, quantiles, axis=-1, - **kwargs) - if not ma.isMaskedArray(data) and not ma.is_masked(result): - result = np.asarray(result) + if not isinstance(percent, Iterable): + percent = [percent] + percent = np.array(percent) + + result = iris._lazy_data.map_complete_blocks( + data, + _calc_percentile, + (-1,), + percent.shape, + percent=percent, + fast_percentile_method=fast_percentile_method, + **kwargs, + ) - # Ensure to unflatten any leading dimensions. - if shape: - if not isinstance(percent, Iterable): - percent = [percent] - percent = np.array(percent) - # Account for the additive dimension. - if percent.shape > (1,): - shape += percent.shape - result = result.reshape(shape) # Check whether to reduce to a scalar result, as per the behaviour # of other aggregators. - if result.shape == (1,) and quantiles.ndim == 0: - result = result[0] + if result.shape == (1,): + result = np.squeeze(result) return result def _weighted_quantile_1D(data, weights, quantiles, **kwargs): - """ - Compute the weighted quantile of a 1D numpy array. + """Compute the weighted quantile of a 1D numpy array. Adapted from `wquantiles `_ - Args: - - * data (array) - One dimensional data array - * weights (array) + Parameters + ---------- + data : array + One dimensional data array. + weights : array Array of the same size of `data`. If data is masked, weights must have matching mask. - * quantiles : (float or sequence of floats) + quantiles : float or sequence of floats Quantile(s) to compute. Must have a value between 0 and 1. - - **kwargs - passed to `scipy.interpolate.interp1d` - - Returns: - array or float. Calculated quantile values (set to np.nan wherever sum - of weights is zero or masked) + **kwargs : dict, optional + Passed to `scipy.interpolate.interp1d`. + + Returns + ------- + array or float. + Calculated quantile values (set to np.nan wherever sum + of weights is zero or masked). """ - # Return np.nan if no useable points found - if np.isclose(weights.sum(), 0.) or ma.is_masked(weights.sum()): + # Return np.nan if no usable points found + if np.isclose(weights.sum(), 0.0) or ma.is_masked(weights.sum()): return np.resize(np.array(np.nan), len(quantiles)) # Sort the data ind_sorted = ma.argsort(data) @@ -1140,10 +1438,11 @@ def _weighted_quantile_1D(data, weights, quantiles, **kwargs): sorted_weights = weights[ind_sorted] # Compute the auxiliary arrays Sn = np.cumsum(sorted_weights) - Pn = (Sn-0.5*sorted_weights)/np.sum(sorted_weights) + Pn = (Sn - 0.5 * sorted_weights) / np.sum(sorted_weights) # Get the value of the weighted quantiles - interpolator = scipy.interpolate.interp1d(Pn, sorted_data, - bounds_error=False, **kwargs) + interpolator = scipy.interpolate.interp1d( + Pn, sorted_data, bounds_error=False, **kwargs + ) result = interpolator(quantiles) # Set cases where quantile falls outside data range to min or max np.place(result, Pn.min() > quantiles, sorted_data.min()) @@ -1152,41 +1451,44 @@ def _weighted_quantile_1D(data, weights, quantiles, **kwargs): return result -def _weighted_percentile(data, axis, weights, percent, returned=False, - **kwargs): - """ - The weighted_percentile aggregator is an additive operation. This means - that it *may* introduce a new dimension to the data for the statistic being - calculated, but only if more than one percentile point is requested. +def _weighted_percentile(data, axis, weights, percent, returned=False, **kwargs): + """Weighted_percentile aggregator is an additive operation. + + This means that it *may* introduce a new dimension to the data for the + statistic being calculated, but only if more than one percentile point is + requested. If a new additive dimension is formed, then it will always be the last dimension of the resulting percentile data payload. - Args: - - * data: ndarray or masked array - - * axis: int - axis to calculate percentiles over - - * weights: ndarray - array with the weights. Must have same shape as data - - * percent: float or sequence of floats - Percentile rank/s at which to extract value/s. - - * returned: bool, optional - Default False. If True, returns a tuple with the percentiles as the - first element and the sum of the weights as the second element. + Parameters + ---------- + data : ndarray or masked array + axis : int + Axis to calculate percentiles over. + weights : ndarray + Array with the weights. Must have same shape as data or the shape of + data along axis. + percent : float or sequence of floats + Percentile rank/s at which to extract value/s. + returned : bool, default=False + Default False. If True, returns a tuple with the percentiles as the + first element and the sum of the weights as the second element. """ - # Ensure that data and weights arrays are same shape. - if data.shape != weights.shape: - raise ValueError('_weighted_percentile: weights wrong shape.') + # Ensure that weights array is the same shape as data, or the shape of data along + # axis. + if data.shape != weights.shape and data.shape[axis : axis + 1] != weights.shape: + raise ValueError( + f"For data array of shape {data.shape}, weights should be {data.shape} or {data.shape[axis : axis + 1]}" + ) # Ensure that the target axis is the last dimension. data = np.rollaxis(data, axis, start=data.ndim) - weights = np.rollaxis(weights, axis, start=data.ndim) - quantiles = np.array(percent) / 100. + if weights.ndim > 1: + weights = np.rollaxis(weights, axis, start=data.ndim) + elif data.ndim > 1: + weights = np.broadcast_to(weights, data.shape) + quantiles = np.array(percent) / 100.0 # Add data mask to weights if necessary. if ma.isMaskedArray(data): weights = ma.array(weights, mask=data.mask) @@ -1229,18 +1531,21 @@ def _weighted_percentile(data, axis, weights, percent, returned=False, return result -@_build_dask_mdtol_function -def _lazy_count(array, **kwargs): - array = iris._lazy_data.as_lazy_data(array) - func = kwargs.pop('function', None) +def _count(array, **kwargs): + """Count number of points along the axis that satisfy the condition. + + Condition specified by ``function``. Uses Dask's support for NEP13/18 to + work as either a lazy or a real function. + + """ + func = kwargs.pop("function", None) if not callable(func): - emsg = 'function must be a callable. Got {}.' + emsg = "function must be a callable. Got {}." raise TypeError(emsg.format(type(func))) - return da.sum(func(array), **kwargs) + return np.sum(func(array), **kwargs) def _proportion(array, function, axis, **kwargs): - count = iris._lazy_data.non_lazy(_lazy_count) # if the incoming array is masked use that to count the total number of # values if ma.isMaskedArray(array): @@ -1251,8 +1556,9 @@ def _proportion(array, function, axis, **kwargs): # case pass the array shape instead of the mask: total_non_masked = array.shape[axis] else: - total_non_masked = count( - array.mask, axis=axis, function=np.logical_not, **kwargs) + total_non_masked = _count( + array.mask, axis=axis, function=np.logical_not, **kwargs + ) total_non_masked = ma.masked_equal(total_non_masked, 0) else: total_non_masked = array.shape[axis] @@ -1262,59 +1568,112 @@ def _proportion(array, function, axis, **kwargs): # Otherwise, it is possible for numpy to return a masked array that has # a dtype for its data that is different to the dtype of the fill-value, # which can cause issues outside this function. - # Reference - tests/unit/analyis/test_PROPORTION.py Test_masked.test_ma - numerator = count(array, axis=axis, function=function, **kwargs) + # Reference - tests/unit/analysis/test_PROPORTION.py Test_masked.test_ma + numerator = _count(array, axis=axis, function=function, **kwargs) result = ma.asarray(numerator / total_non_masked) return result +def _lazy_max_run(array, axis=-1, **kwargs): + """Lazily perform the calculation of maximum run lengths along the given axis.""" + array = iris._lazy_data.as_lazy_data(array) + func = kwargs.pop("function", None) + if not callable(func): + emsg = "function must be a callable. Got {}." + raise TypeError(emsg.format(type(func))) + bool_array = da.ma.getdata(func(array)) + bool_array = da.logical_and(bool_array, da.logical_not(da.ma.getmaskarray(array))) + padding = [(0, 0)] * array.ndim + padding[axis] = (0, 1) + ones_zeros = da.pad(bool_array, padding).astype(int) + cum_sum = da.cumsum(ones_zeros, axis=axis) + run_totals = da.where(ones_zeros == 0, cum_sum, 0) + stepped_run_lengths = da.reductions.cumreduction( + np.maximum.accumulate, + np.maximum, + -np.inf, + run_totals, + axis=axis, + dtype=cum_sum.dtype, + out=None, + method="sequential", + preop=None, + ) + run_lengths = da.diff(stepped_run_lengths, axis=axis) + result = da.max(run_lengths, axis=axis) + + # Check whether to reduce to a scalar result, as per the behaviour + # of other aggregators. + if result.shape == (1,): + result = da.squeeze(result) + + return result + + def _rms(array, axis, **kwargs): - # XXX due to the current limitations in `da.average` (see below), maintain - # an explicit non-lazy aggregation function for now. - # Note: retaining this function also means that if weights are passed to - # the lazy aggregator, the aggregation will fall back to using this - # non-lazy aggregator. - rval = np.sqrt(ma.average(np.square(array), axis=axis, **kwargs)) - if not ma.isMaskedArray(array): - rval = np.asarray(rval) + rval = np.sqrt(ma.average(array**2, axis=axis, **kwargs)) + return rval -@_build_dask_mdtol_function def _lazy_rms(array, axis, **kwargs): - # XXX This should use `da.average` and not `da.mean`, as does the above. - # However `da.average` current doesn't handle masked weights correctly - # (see https://github.com/dask/dask/issues/3846). - # To work around this we use da.mean, which doesn't support weights at - # all. Thus trying to use this aggregator with weights will currently - # raise an error in dask due to the unexpected keyword `weights`, - # rather than silently returning the wrong answer. - return da.sqrt(da.mean(array ** 2, axis=axis, **kwargs)) - - -@_build_dask_mdtol_function -def _lazy_sum(array, **kwargs): - array = iris._lazy_data.as_lazy_data(array) - # weighted or scaled sum - axis_in = kwargs.get('axis', None) - weights_in = kwargs.pop('weights', None) - returned_in = kwargs.pop('returned', False) + # Note that, since we specifically need the ma version of average to handle + # weights correctly with masked data, we cannot rely on NEP13/18 and need + # to implement a separate lazy RMS function. + + rval = da.sqrt(da.ma.average(array**2, axis=axis, **kwargs)) + + return rval + + +def _sum(array, **kwargs): + """Weighted or scaled sum. + + Uses Dask's support for NEP13/18 to work as either a lazy or a real + function. + + """ + axis_in = kwargs.get("axis", None) + weights_in = kwargs.pop("weights", None) + returned_in = kwargs.pop("returned", False) if weights_in is not None: - wsum = da.sum(weights_in * array, **kwargs) + wsum = np.sum(weights_in * array, **kwargs) else: - wsum = da.sum(array, **kwargs) + wsum = np.sum(array, **kwargs) if returned_in: + al = da if iris._lazy_data.is_lazy_data(array) else np if weights_in is None: - weights = iris._lazy_data.as_lazy_data(np.ones_like(array)) + weights = al.ones_like(array) + if al is da: + # Dask version of ones_like does not preserve masks. See dask#9301. + weights = da.ma.masked_array(weights, da.ma.getmaskarray(array)) else: - weights = weights_in - rvalue = (wsum, da.sum(weights, axis=axis_in)) + weights = al.ma.masked_array(weights_in, mask=al.ma.getmaskarray(array)) + rvalue = (wsum, np.sum(weights, axis=axis_in)) else: rvalue = wsum return rvalue +def _sum_units_func(units, **kwargs): + """Multiply original units with weight units if possible.""" + weights = kwargs.get("weights") + weights_units = kwargs.get("_weights_units") + multiply_by_weights_units = all( + [ + weights is not None, + weights_units is not None, + weights_units != "1", + ] + ) + if multiply_by_weights_units: + result = units * weights_units + else: + result = units + return result + + def _peak(array, **kwargs): def column_segments(column): nan_indices = np.where(np.isnan(column))[0] @@ -1328,10 +1687,9 @@ def column_segments(column): if index != nan_index: columns.append(column[:nan_index]) elif nan_indices[index - 1] != (nan_index - 1): - columns.append(column[nan_indices[index - 1] + 1: - nan_index]) + columns.append(column[nan_indices[index - 1] + 1 : nan_index]) if nan_indices[-1] != len(column) - 1: - columns.append(column[nan_indices[-1] + 1:]) + columns.append(column[nan_indices[-1] + 1 :]) return columns def interp_order(length): @@ -1349,11 +1707,11 @@ def interp_order(length): slices[-1] = endslice slices = tuple(slices) # Numpy>=1.16 : index with tuple, *not* list. - if isinstance(array.dtype, np.float): + if isinstance(array.dtype, np.float64): data = array[slices] else: # Cast non-float data type. - data = array.astype('float32')[slices] + data = array.astype("float32")[slices] # Generate nd-index iterator over array. shape = list(array.shape) @@ -1367,20 +1725,24 @@ def interp_order(length): # Check if the column slice contains a single value, nans only, # masked values only or if the values are all equal. - equal_slice = np.ones(column_slice.size, - dtype=column_slice.dtype) * column_slice[0] - if column_slice.size == 1 or \ - all(np.isnan(column_slice)) or \ - ma.count(column_slice) == 0 or \ - np.all(np.equal(equal_slice, column_slice)): + equal_slice = ( + np.ones(column_slice.size, dtype=column_slice.dtype) * column_slice[0] + ) + if ( + column_slice.size == 1 + or all(np.isnan(column_slice)) + or ma.count(column_slice) == 0 + or np.all(np.equal(equal_slice, column_slice)) + ): continue # Check if the column slice is masked. if ma.isMaskedArray(column_slice): # Check if the column slice contains only nans, without inf # or -inf values, regardless of the mask. - if not np.any(np.isfinite(column_slice)) and \ - not np.any(np.isinf(column_slice)): + if not np.any(np.isfinite(column_slice)) and not np.any( + np.isinf(column_slice) + ): data[ndindex[:-1]] = np.nan continue @@ -1421,60 +1783,99 @@ def interp_order(length): # # Common partial Aggregation class constructors. # -COUNT = Aggregator('count', iris._lazy_data.non_lazy(_lazy_count), - units_func=lambda units: 1, - lazy_func=_lazy_count) +COUNT = Aggregator( + "count", + _count, + units_func=lambda units, **kwargs: 1, + lazy_func=_build_dask_mdtol_function(_count), +) """ An :class:`~iris.analysis.Aggregator` instance that counts the number of :class:`~iris.cube.Cube` data occurrences that satisfy a particular criterion, as defined by a user supplied *function*. -**Required** kwargs associated with the use of this aggregator: - -* function (callable): +Parameters +---------- +function : callable A function which converts an array of data values into a corresponding array of True/False values. -**For example**: - +Examples +-------- To compute the number of *ensemble members* with precipitation exceeding 10 (in cube data units) could be calculated with:: result = precip_cube.collapsed('ensemble_member', iris.analysis.COUNT, function=lambda values: values > 10) -.. seealso:: The :func:`~iris.analysis.PROPORTION` aggregator. +This aggregator handles masked data and lazy data. + +See Also +-------- +PROPORTION : Aggregator instance. +Aggregator : Aggregator Class + + +""" -This aggregator handles masked data. +MAX_RUN = Aggregator( + None, + iris._lazy_data.non_lazy(_lazy_max_run), + units_func=lambda units, **kwargs: 1, + lazy_func=_build_dask_mdtol_function(_lazy_max_run), +) """ +An :class:`~iris.analysis.Aggregator` instance that finds the longest run of +:class:`~iris.cube.Cube` data occurrences that satisfy a particular criterion, +as defined by a user supplied *function*, along the given axis. + +Parameters +---------- +function : callable + A function which converts an array of data values into a corresponding array + of True/False values. + +Examples +-------- +The longest run of days with precipitation exceeding 10 (in cube data units) at +each grid location could be calculated with:: + + result = precip_cube.collapsed('time', iris.analysis.MAX_RUN, + function=lambda values: values > 10) + +This aggregator handles masked data, which it treats as interrupting a run, +and lazy data. + +""" +MAX_RUN.name = lambda: "max_run" # type: ignore[method-assign] -GMEAN = Aggregator('geometric_mean', scipy.stats.mstats.gmean) +GMEAN = Aggregator("geometric_mean", scipy.stats.mstats.gmean) """ An :class:`~iris.analysis.Aggregator` instance that calculates the geometric mean over a :class:`~iris.cube.Cube`, as computed by :func:`scipy.stats.mstats.gmean`. -**For example**: - +Examples +-------- To compute zonal geometric means over the *longitude* axis of a cube:: result = cube.collapsed('longitude', iris.analysis.GMEAN) -This aggregator handles masked data. +This aggregator handles masked data, but NOT lazy data. """ -HMEAN = Aggregator('harmonic_mean', scipy.stats.mstats.hmean) +HMEAN = Aggregator("harmonic_mean", scipy.stats.mstats.hmean) """ An :class:`~iris.analysis.Aggregator` instance that calculates the harmonic mean over a :class:`~iris.cube.Cube`, as computed by :func:`scipy.stats.mstats.hmean`. -**For example**: - +Examples +-------- To compute zonal harmonic mean over the *longitude* axis of a cube:: result = cube.collapsed('longitude', iris.analysis.HMEAN) @@ -1484,38 +1885,39 @@ def interp_order(length): The harmonic mean is only valid if all data values are greater than zero. -This aggregator handles masked data. +This aggregator handles masked data, but NOT lazy data. """ -MEAN = WeightedAggregator('mean', ma.average, - lazy_func=_build_dask_mdtol_function(da.ma.average)) +MEAN = WeightedAggregator( + "mean", ma.average, lazy_func=_build_dask_mdtol_function(da.ma.average) +) """ An :class:`~iris.analysis.Aggregator` instance that calculates the mean over a :class:`~iris.cube.Cube`, as computed by :func:`numpy.ma.average`. -Additional kwargs associated with the use of this aggregator: - -* mdtol (float): +Parameters +---------- +mdtol : float, optional Tolerance of missing data. The value returned in each element of the returned array will be masked if the fraction of masked data contributing to that element exceeds mdtol. This fraction is calculated based on the number of masked elements. mdtol=0 means no missing data is tolerated while mdtol=1 means the resulting element will be masked if and only if all the contributing elements are masked. Defaults to 1. -* weights (float ndarray): +weights : float ndarray, optional Weights matching the shape of the cube or the length of the window for rolling window operations. Note that, latitude/longitude area weights can be calculated using :func:`iris.analysis.cartography.area_weights`. -* returned (boolean): +returned : bool, optional Set this to True to indicate that the collapsed weights are to be returned along with the collapsed data. Defaults to False. -**For example**: - +Examples +-------- To compute zonal means over the *longitude* axis of a cube:: result = cube.collapsed('longitude', iris.analysis.MEAN) @@ -1537,60 +1939,60 @@ def interp_order(length): """ -MEDIAN = Aggregator('median', ma.median) +MEDIAN = Aggregator("median", ma.median) """ An :class:`~iris.analysis.Aggregator` instance that calculates the median over a :class:`~iris.cube.Cube`, as computed by :func:`numpy.ma.median`. -**For example**: - +Examples +-------- To compute zonal medians over the *longitude* axis of a cube:: result = cube.collapsed('longitude', iris.analysis.MEDIAN) -This aggregator handles masked data. + +This aggregator handles masked data, but NOT lazy data. For lazy aggregation, +please try :obj:`~.PERCENTILE`. """ -MIN = Aggregator('minimum', ma.min, - lazy_func=_build_dask_mdtol_function(da.min)) +MIN = Aggregator("minimum", ma.min, lazy_func=_build_dask_mdtol_function(da.min)) """ An :class:`~iris.analysis.Aggregator` instance that calculates the minimum over a :class:`~iris.cube.Cube`, as computed by :func:`numpy.ma.min`. -**For example**: - +Examples +-------- To compute zonal minimums over the *longitude* axis of a cube:: result = cube.collapsed('longitude', iris.analysis.MIN) -This aggregator handles masked data. +This aggregator handles masked data and lazy data. """ -MAX = Aggregator('maximum', ma.max, - lazy_func=_build_dask_mdtol_function(da.max)) +MAX = Aggregator("maximum", ma.max, lazy_func=_build_dask_mdtol_function(da.max)) """ An :class:`~iris.analysis.Aggregator` instance that calculates the maximum over a :class:`~iris.cube.Cube`, as computed by :func:`numpy.ma.max`. -**For example**: - +Examples +-------- To compute zonal maximums over the *longitude* axis of a cube:: result = cube.collapsed('longitude', iris.analysis.MAX) -This aggregator handles masked data. +This aggregator handles masked data and lazy data. """ -PEAK = Aggregator('peak', _peak) +PEAK = Aggregator("peak", _peak) """ An :class:`~iris.analysis.Aggregator` instance that calculates the peak value derived from a spline interpolation over a @@ -1605,65 +2007,78 @@ def interp_order(length): If multiple coordinates are specified, then the peak calculations are performed individually, in sequence, for each coordinate specified. -**For example**: - +Examples +-------- To compute the peak over the *time* axis of a cube:: result = cube.collapsed('time', iris.analysis.PEAK) -This aggregator handles masked data. +This aggregator handles masked data but NOT lazy data. """ -PERCENTILE = PercentileAggregator(alphap=1, betap=1) +PERCENTILE = PercentileAggregator() """ -An :class:`~iris.analysis.PercentileAggregator` instance that calculates the +A :class:`~iris.analysis.PercentileAggregator` instance that calculates the percentile over a :class:`~iris.cube.Cube`, as computed by -:func:`scipy.stats.mstats.mquantiles`. - -**Required** kwargs associated with the use of this aggregator: +:func:`scipy.stats.mstats.mquantiles` (default) or :func:`numpy.percentile` (if +``fast_percentile_method`` is True). -* percent (float or sequence of floats): +Parameters +---------- +percent : float or sequence of floats Percentile rank/s at which to extract value/s. - -Additional kwargs associated with the use of this aggregator: - -* alphap (float): +alphap : float, default=1 Plotting positions parameter, see :func:`scipy.stats.mstats.mquantiles`. - Defaults to 1. -* betap (float): +betap : float, default=1 Plotting positions parameter, see :func:`scipy.stats.mstats.mquantiles`. - Defaults to 1. - -**For example**: - +fast_percentile_method : bool, default=False + When set to True, uses :func:`numpy.percentile` method as a faster + alternative to the :func:`scipy.stats.mstats.mquantiles` method. An + exception is raised if the data are masked and the missing data tolerance + is not 0. +**kwargs : dict, optional + Passed to :func:`scipy.stats.mstats.mquantiles` or :func:`numpy.percentile`. + +Examples +-------- To compute the 10th and 90th percentile over *time*:: result = cube.collapsed('time', iris.analysis.PERCENTILE, percent=[10, 90]) -This aggregator handles masked data. +This aggregator handles masked data and lazy data. + +.. note:: + + Performance of this aggregator on lazy data is particularly sensitive to + the dask array chunking, so it may be useful to test with various chunk + sizes for a given application. Any chunking along the dimensions to be + aggregated is removed by the aggregator prior to calculating the + percentiles. """ -PROPORTION = Aggregator('proportion', - _proportion, - units_func=lambda units: 1) +PROPORTION = Aggregator( + "proportion", + _proportion, + units_func=lambda units, **kwargs: 1, +) """ An :class:`~iris.analysis.Aggregator` instance that calculates the proportion, as a fraction, of :class:`~iris.cube.Cube` data occurrences that satisfy a particular criterion, as defined by a user supplied *function*. -**Required** kwargs associated with the use of this aggregator: - -* function (callable): +Parameters +---------- +function : callable A function which converts an array of data values into a corresponding array of True/False values. -**For example**: - +Examples +-------- To compute the probability of precipitation exceeding 10 (in cube data units) across *ensemble members* could be calculated with:: @@ -1678,51 +2093,57 @@ def interp_order(length): .. seealso:: The :func:`~iris.analysis.COUNT` aggregator. -This aggregator handles masked data. +This aggregator handles masked data, but NOT lazy data. """ -RMS = WeightedAggregator('root mean square', _rms, - lazy_func=_build_dask_mdtol_function(_lazy_rms)) +RMS = WeightedAggregator( + "root mean square", _rms, lazy_func=_build_dask_mdtol_function(_lazy_rms) +) """ An :class:`~iris.analysis.Aggregator` instance that calculates the root mean square over a :class:`~iris.cube.Cube`, as computed by ((x0**2 + x1**2 + ... + xN-1**2) / N) ** 0.5. -Additional kwargs associated with the use of this aggregator: +Parameters +---------- -* weights (float ndarray): +weights : array-like, optional Weights matching the shape of the cube or the length of the window for rolling window operations. The weights are applied to the squares when taking the mean. -**For example**: - +Example +------- To compute the zonal root mean square over the *longitude* axis of a cube:: result = cube.collapsed('longitude', iris.analysis.RMS) -This aggregator handles masked data. +This aggregator handles masked data and lazy data. """ -STD_DEV = Aggregator('standard_deviation', ma.std, ddof=1, - lazy_func=_build_dask_mdtol_function(da.std)) +STD_DEV = Aggregator( + "standard_deviation", + ma.std, + ddof=1, + lazy_func=_build_dask_mdtol_function(da.std), +) """ An :class:`~iris.analysis.Aggregator` instance that calculates the standard deviation over a :class:`~iris.cube.Cube`, as computed by :func:`numpy.ma.std`. -Additional kwargs associated with the use of this aggregator: - -* ddof (integer): +Parameters +---------- +ddof : int, optioonal Delta degrees of freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. Defaults to 1. -**For example**: - +Examples +-------- To compute zonal standard deviations over the *longitude* axis of a cube:: result = cube.collapsed('longitude', iris.analysis.STD_DEV) @@ -1733,32 +2154,36 @@ def interp_order(length): .. note:: - Lazy operation is supported, via :func:`dask.array.nanstd`. + Lazy operation is supported, via :func:`dask.array.std`. This aggregator handles masked data. """ -SUM = WeightedAggregator('sum', iris._lazy_data.non_lazy(_lazy_sum), - lazy_func=_build_dask_mdtol_function(_lazy_sum)) +SUM = WeightedAggregator( + "sum", + _sum, + units_func=_sum_units_func, + lazy_func=_build_dask_mdtol_function(_sum), +) """ An :class:`~iris.analysis.Aggregator` instance that calculates the sum over a :class:`~iris.cube.Cube`, as computed by :func:`numpy.ma.sum`. -Additional kwargs associated with the use of this aggregator: - -* weights (float ndarray): +Parameters +---------- +weights : float ndarray, optional Weights matching the shape of the cube, or the length of the window for rolling window operations. Weights should be normalized before using them with this aggregator if scaling is not intended. -* returned (boolean): +returned : bool, optional Set this to True to indicate the collapsed weights are to be returned along with the collapsed data. Defaults to False. -**For example**: - +Examples +-------- To compute an accumulation over the *time* axis of a cube:: result = cube.collapsed('time', iris.analysis.SUM) @@ -1769,29 +2194,31 @@ def interp_order(length): result = cube.rolling_window('time', iris.analysis.SUM, len(weights), weights=weights) -This aggregator handles masked data. +This aggregator handles masked data and lazy data. """ -VARIANCE = Aggregator('variance', - ma.var, - units_func=lambda units: units * units, - lazy_func=_build_dask_mdtol_function(da.var), - ddof=1) +VARIANCE = Aggregator( + "variance", + ma.var, + units_func=lambda units, **kwargs: units * units, + lazy_func=_build_dask_mdtol_function(da.var), + ddof=1, +) """ An :class:`~iris.analysis.Aggregator` instance that calculates the variance over a :class:`~iris.cube.Cube`, as computed by :func:`numpy.ma.var`. -Additional kwargs associated with the use of this aggregator: - -* ddof (integer): +Parameters +---------- +ddof : int, optional Delta degrees of freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. Defaults to 1. -**For example**: - +Examples +-------- To compute zonal variance over the *longitude* axis of a cube:: result = cube.collapsed('longitude', iris.analysis.VARIANCE) @@ -1802,9 +2229,9 @@ def interp_order(length): .. note:: - Lazy operation is supported, via :func:`dask.array.nanvar`. + Lazy operation is supported, via :func:`dask.array.var`. -This aggregator handles masked data. +This aggregator handles masked data and lazy data. """ @@ -1814,35 +2241,33 @@ def interp_order(length): An :class:`~iris.analysis.WeightedPercentileAggregator` instance that calculates the weighted percentile over a :class:`~iris.cube.Cube`. -**Required** kwargs associated with the use of this aggregator: - -* percent (float or sequence of floats): +Parameters +---------- +percent : float or sequence of floats Percentile rank/s at which to extract value/s. - -* weights (float ndarray): +weights : float ndarray Weights matching the shape of the cube or the length of the window for rolling window operations. Note that, latitude/longitude area weights can be calculated using :func:`iris.analysis.cartography.area_weights`. - -Additional kwargs associated with the use of this aggregator: - -* returned (boolean): +returned : bool, optional Set this to True to indicate that the collapsed weights are to be returned along with the collapsed data. Defaults to False. - -* kind (string or int): +kind : str or int, optional Specifies the kind of interpolation used, see :func:`scipy.interpolate.interp1d` Defaults to "linear", which is - equivalent to alphap=0.5, betap=0.5 in `iris.analysis.PERCENTILE` + equivalent to alphap=0.5, betap=0.5 in :data:`~iris.analysis.PERCENTILE` + +Notes +------ +This function does not maintain laziness when called; it realises data. +See more at :doc:`/userguide/real_and_lazy_data`. """ -class _Groupby(object): - """ - Convenience class to determine group slices over one or more group-by - coordinates. +class _Groupby: + """Determine group slices over one or more group-by coordinates. Generate the coordinate slices for the groups and calculate the new group-by coordinates and the new shared coordinates given the @@ -1860,33 +2285,39 @@ class _Groupby(object): group-by analysis. """ - def __init__(self, groupby_coords, shared_coords=None): - """ - Determine the group slices over the group-by coordinates. - - Args: - * groupby_coords (list :class:`iris.coords.Coord` instances): + def __init__( + self, + groupby_coords: Iterable[AuxCoord | DimCoord], + shared_coords: Optional[Iterable[tuple[AuxCoord | DimCoord, int]]] = None, + climatological: bool = False, + ) -> None: + """Determine the group slices over the group-by coordinates. + + Parameters + ---------- + groupby_coords : list of :class:`iris.coords.Coord` One or more coordinates from the same axis over which to group-by. - - Kwargs: - - * shared_coords (list of (:class:`iris.coords.Coord`, `int`) pairs): + shared_coords : list of (:class:`iris.coords.Coord`, `int`) pairs One or more coordinates (including multidimensional coordinates) that share the same group-by coordinate axis. The `int` identifies which dimension of the coord is on the group-by coordinate axis. + climatological : bool, default=False + Indicates whether the output is expected to be climatological. For + any aggregated time coord(s), this causes the climatological flag to + be set and the point for each cell to equal its first bound, thereby + preserving the time of year. """ #: Group-by and shared coordinates that have been grouped. - self.coords = [] - self._groupby_coords = [] - self._shared_coords = [] - self._slices_by_key = OrderedDict() + self.coords: list[AuxCoord | DimCoord] = [] + self._groupby_coords: list[AuxCoord | DimCoord] = [] + self._shared_coords: list[tuple[AuxCoord | DimCoord, int]] = [] + self._groupby_indices: list[tuple[int, ...]] = [] self._stop = None # Ensure group-by coordinates are iterable. if not isinstance(groupby_coords, Iterable): - raise TypeError('groupby_coords must be a ' - '`collections.Iterable` type.') + raise TypeError("groupby_coords must be a `collections.Iterable` type.") # Add valid group-by coordinates. for coord in groupby_coords: @@ -1896,155 +2327,116 @@ def __init__(self, groupby_coords, shared_coords=None): if shared_coords is not None: # Ensure shared coordinates are iterable. if not isinstance(shared_coords, Iterable): - raise TypeError('shared_coords must be a ' - '`collections.Iterable` type.') + raise TypeError("shared_coords must be a `collections.Iterable` type.") # Add valid shared coordinates. for coord, dim in shared_coords: self._add_shared_coord(coord, dim) - def _add_groupby_coord(self, coord): + # Aggregation is climatological in nature + self.climatological = climatological + + # Stores mapping from original cube coords to new ones, as metadata may + # not match + self.coord_replacement_mapping: list[ + tuple[AuxCoord | DimCoord, AuxCoord | DimCoord] + ] = [] + + def _add_groupby_coord(self, coord: AuxCoord | DimCoord) -> None: if coord.ndim != 1: raise iris.exceptions.CoordinateMultiDimError(coord) if self._stop is None: self._stop = coord.shape[0] if coord.shape[0] != self._stop: - raise ValueError('Group-by coordinates have different lengths.') + raise ValueError("Group-by coordinates have different lengths.") self._groupby_coords.append(coord) - def _add_shared_coord(self, coord, dim): + def _add_shared_coord(self, coord: AuxCoord | DimCoord, dim: int) -> None: if coord.shape[dim] != self._stop and self._stop is not None: - raise ValueError('Shared coordinates have different lengths.') + raise ValueError("Shared coordinates have different lengths.") self._shared_coords.append((coord, dim)) - def group(self): - """ - Calculate the groups and associated slices over one or more group-by - coordinates. + def group(self) -> list[tuple[int, ...]]: + """Calculate groups and associated slices over one or more group-by coordinates. Also creates new group-by and shared coordinates given the calculated group slices. - Returns: - A generator of the coordinate group slices. - - """ - if self._groupby_coords: - if not self._slices_by_key: - items = [] - groups = [] - - for coord in self._groupby_coords: - groups.append(iris.coords._GroupIterator(coord.points)) - items.append(next(groups[-1])) - - # Construct the group slice for each group over the group-by - # coordinates. Keep constructing until all group-by coordinate - # groups are exhausted. - while any([item is not None for item in items]): - # Determine the extent (start, stop) of the group given - # each current group-by coordinate group. - start = max([item.groupby_slice.start for item in items - if item is not None]) - stop = min([item.groupby_slice.stop for item in items - if item is not None]) - # Construct composite group key for the group using the - # start value from each group-by coordinate. - key = tuple([coord.points[start] for coord - in self._groupby_coords]) - # Associate group slice with group key within the ordered - # dictionary. - self._slices_by_key.setdefault(key, []).append(slice(start, - stop)) - # Prepare for the next group slice construction over the - # group-by coordinates. - for item_index, item in enumerate(items): - if item is None: - continue - # Get coordinate current group slice. - groupby_slice = item.groupby_slice - # Determine whether coordinate has spanned all its - # groups i.e. its full length - # or whether we need to get the coordinates next group. - if groupby_slice.stop == self._stop: - # This coordinate has exhausted all its groups, - # so remove it. - items[item_index] = None - elif groupby_slice.stop == stop: - # The current group of this coordinate is - # exhausted, so get the next one. - items[item_index] = next(groups[item_index]) - - # Merge multiple slices together into one tuple. - self._slice_merge() - # Calculate the new group-by coordinates. - self._compute_groupby_coords() - # Calculate the new shared coordinates. - self._compute_shared_coords() - # Generate the group-by slices/groups. - for groupby_slice in six.itervalues(self._slices_by_key): - yield groupby_slice - - return - - def _slice_merge(self): - """ - Merge multiple slices into one tuple and collapse items from - containing list. - - """ - # Iterate over the ordered dictionary in order to reduce - # multiple slices into a single tuple and collapse - # all items from containing list. - for key, groupby_slices in six.iteritems(self._slices_by_key): - if len(groupby_slices) > 1: - # Compress multiple slices into tuple representation. - groupby_indicies = [] - - for groupby_slice in groupby_slices: - groupby_indicies.extend(range(groupby_slice.start, - groupby_slice.stop)) - - self._slices_by_key[key] = tuple(groupby_indicies) - else: - # Remove single inner slice from list. - self._slices_by_key[key] = groupby_slices[0] - - def _compute_groupby_coords(self): + Returns + ------- + A list of the coordinate group slices. + + """ + if not self._groupby_indices: + # Construct the group indices for each group over the group-by + # coordinates. Keep constructing until all group-by coordinate + # groups are exhausted. + + def group_iterator(points): + start = 0 + for _, group in itertools.groupby(points): + stop = sum((1 for _ in group), start) + yield slice(start, stop) + start = stop + + groups = [group_iterator(c.points) for c in self._groupby_coords] + groupby_slices = [next(group) for group in groups] + indices_by_key: dict[tuple[Union[Number, str], ...], list[int]] = {} + while any(s is not None for s in groupby_slices): + # Determine the extent (start, stop) of the group given + # each current group-by coordinate group. + start = max(s.start for s in groupby_slices if s is not None) + stop = min(s.stop for s in groupby_slices if s is not None) + # Construct composite group key for the group using the + # start value from each group-by coordinate. + key = tuple(coord.points[start] for coord in self._groupby_coords) + # Associate group slice with group key within the ordered + # dictionary. + indices_by_key.setdefault(key, []).extend(range(start, stop)) + # Prepare for the next group slice construction over the + # group-by coordinates. + for index, groupby_slice in enumerate(groupby_slices): + if groupby_slice is None: + continue + # Determine whether coordinate has spanned all its + # groups i.e. its full length + # or whether we need to get the coordinates next group. + if groupby_slice.stop == self._stop: + # This coordinate has exhausted all its groups, + # so remove it. + groupby_slices[index] = None + elif groupby_slice.stop == stop: + # The current group of this coordinate is + # exhausted, so get the next one. + groupby_slices[index] = next(groups[index]) + + # Cache the indices + self._groupby_indices = [tuple(i) for i in indices_by_key.values()] + # Calculate the new group-by coordinates. + self._compute_groupby_coords() + # Calculate the new shared coordinates. + self._compute_shared_coords() + + # Return the group-by indices/groups. + return self._groupby_indices + + def _compute_groupby_coords(self) -> None: """Create new group-by coordinates given the group slices.""" - - groupby_slice = [] - - # Iterate over the ordered dictionary in order to construct - # a group-by slice that samples the first element from each group. - for key_slice in six.itervalues(self._slices_by_key): - if isinstance(key_slice, tuple): - groupby_slice.append(key_slice[0]) - else: - groupby_slice.append(key_slice.start) - - groupby_slice = np.array(groupby_slice) + # Construct a group-by slice that samples the first element from each + # group. + groupby_slice = np.array([i[0] for i in self._groupby_indices]) # Create new group-by coordinates from the group-by slice. self.coords = [coord[groupby_slice] for coord in self._groupby_coords] - def _compute_shared_coords(self): + def _compute_shared_coords(self) -> None: """Create the new shared coordinates given the group slices.""" - - groupby_bounds = [] - - # Iterate over the ordered dictionary in order to construct - # a list of tuple group boundary indexes. - for key_slice in six.itervalues(self._slices_by_key): - if isinstance(key_slice, tuple): - groupby_bounds.append((key_slice[0], key_slice[-1])) - else: - groupby_bounds.append((key_slice.start, key_slice.stop-1)) - - # Create new shared bounded coordinates. for coord, dim in self._shared_coords: - if coord.points.dtype.kind in 'SU': + climatological_coord = ( + self.climatological and coord.units.is_time_reference() + ) + if coord.points.dtype.kind in "SU": if coord.bounds is None: - new_points = [] + new_points_list = [] new_bounds = None # np.apply_along_axis does not work with str.join, so we # need to loop through the array directly. First move axis @@ -2052,106 +2444,131 @@ def _compute_shared_coords(self): work_arr = np.moveaxis(coord.points, dim, -1) shape = work_arr.shape work_shape = (-1, shape[-1]) - new_shape = (len(self),) + new_shape: tuple[int, ...] = (len(self),) if coord.ndim > 1: new_shape += shape[:-1] work_arr = work_arr.reshape(work_shape) - for key_slice in six.itervalues(self._slices_by_key): - if isinstance(key_slice, slice): - indices = key_slice.indices( - coord.points.shape[dim]) - key_slice = range(*indices) - + for indices in self._groupby_indices: for arr in work_arr: - new_points.append('|'.join(arr.take(key_slice))) + new_points_list.append("|".join(arr.take(indices))) # Reinstate flattened dimensions. Aggregated dim now leads. - new_points = np.array(new_points).reshape(new_shape) + new_points = np.array(new_points_list).reshape(new_shape) # Move aggregated dimension back to position it started in. new_points = np.moveaxis(new_points, 0, dim) else: - msg = ('collapsing the bounded string coordinate {0!r}' - ' is not supported'.format(coord.name())) + msg = ( + "collapsing the bounded string coordinate" + f" {coord.name()!r} is not supported" + ) raise ValueError(msg) else: - new_bounds = [] + new_bounds_list = [] + if coord.has_bounds(): + # Derive new coord's bounds from bounds. + item = coord.bounds + maxmin_axis: Union[int, tuple[int, int]] = (dim, -1) + first_choices = coord.bounds.take(0, -1) + last_choices = coord.bounds.take(1, -1) + + else: + # Derive new coord's bounds from points. + item = coord.points + maxmin_axis = dim + first_choices = last_choices = coord.points + + # Check whether item is monotonic along the dimension of interest. + deltas = np.diff(item, 1, dim) + monotonic = np.all(deltas >= 0) or np.all(deltas <= 0) # Construct list of coordinate group boundary pairs. - for start, stop in groupby_bounds: - if coord.has_bounds(): - # Collapse group bounds into bounds. - if (getattr(coord, 'circular', False) and - (stop + 1) == coord.shape[dim]): - new_bounds.append( - [coord.bounds.take(start, dim).take(0, -1), - coord.bounds.take(0, dim).take(0, -1) + - coord.units.modulus]) - else: - new_bounds.append( - [coord.bounds.take(start, dim).take(0, -1), - coord.bounds.take(stop, dim).take(1, -1)]) - else: - # Collapse group points into bounds. - if (getattr(coord, 'circular', False) and - (stop + 1) == len(coord.points)): - new_bounds.append([coord.points.take(start, dim), - coord.points.take(0, dim) + - coord.units.modulus]) + if monotonic: + # Use first and last bound or point for new bounds. + for indices in self._groupby_indices: + start, stop = indices[0], indices[-1] + if ( + getattr(coord, "circular", False) + and (stop + 1) == self._stop + ): + new_bounds_list.append( + [ + first_choices.take(start, dim), + first_choices.take(0, dim) + coord.units.modulus, + ] + ) else: - new_bounds.append([coord.points.take(start, dim), - coord.points.take(stop, dim)]) + new_bounds_list.append( + [ + first_choices.take(start, dim), + last_choices.take(stop, dim), + ] + ) + else: + # Use min and max bound or point for new bounds. + for indices in self._groupby_indices: + item_slice = item.take(indices, dim) + new_bounds_list.append( + [ + item_slice.min(axis=maxmin_axis), + item_slice.max(axis=maxmin_axis), + ] + ) # Bounds needs to be an array with the length 2 start-stop # dimension last, and the aggregated dimension back in its # original position. - new_bounds = np.moveaxis( - np.array(new_bounds), (0, 1), (dim, -1)) + new_bounds = np.moveaxis(np.array(new_bounds_list), (0, 1), (dim, -1)) # Now create the new bounded group shared coordinate. try: - new_points = new_bounds.mean(-1) + if climatological_coord: + # Use the first bound as the point + new_points = new_bounds[..., 0] + else: + new_points = new_bounds.mean(-1) except TypeError: - msg = 'The {0!r} coordinate on the collapsing dimension' \ - ' cannot be collapsed.'.format(coord.name()) + msg = ( + f"The {coord.name()!r} coordinate on the collapsing" + " dimension cannot be collapsed." + ) raise ValueError(msg) try: - self.coords.append(coord.copy(points=new_points, - bounds=new_bounds)) + new_coord = coord.copy(points=new_points, bounds=new_bounds) except ValueError: # non monotonic points/bounds - self.coords.append(iris.coords.AuxCoord.from_coord(coord).copy( - points=new_points, bounds=new_bounds)) + new_coord = iris.coords.AuxCoord.from_coord(coord).copy( + points=new_points, bounds=new_bounds + ) - def __len__(self): - """Calculate the number of groups given the group-by coordinates.""" + if climatological_coord: + new_coord.climatological = True + self.coord_replacement_mapping.append((coord, new_coord)) - if self._slices_by_key: - value = len(self._slices_by_key) - else: - value = len([s for s in self.group()]) + self.coords.append(new_coord) - return value + def __len__(self) -> int: + """Calculate the number of groups given the group-by coordinates.""" + return len(self.group()) - def __repr__(self): + def __repr__(self) -> str: groupby_coords = [coord.name() for coord in self._groupby_coords] - - if self._shared_coords_by_name: - shared_coords = [coord.name() for coord in self._shared_coords] - shared_string = ', shared_coords=%r)' % shared_coords - else: - shared_string = ')' - - return '%s(%r%s' % (self.__class__.__name__, groupby_coords, - shared_string) + shared_coords = [coord.name() for coord, _ in self._shared_coords] + return ( + f"{self.__class__.__name__}({groupby_coords!r}" + f", shared_coords={shared_coords!r})" + ) def clear_phenomenon_identity(cube): - """ - Helper function to clear the standard_name, attributes, and - cell_methods of a cube. + """Help to clear the standard_name, attributes and cell_methods of a cube. + + Notes + ----- + This function maintains laziness when called; it does not realise data. + See more at :doc:`/userguide/real_and_lazy_data`. """ cube.rename(None) @@ -2165,59 +2582,92 @@ def clear_phenomenon_identity(cube): # ############################################################################### -class Linear(object): - """ - This class describes the linear interpolation and regridding scheme for - interpolating or regridding over one or more orthogonal coordinates, + +class Interpolator(Protocol): + def __call__( # noqa: E704 # ruff formatting conflicts with flake8 + self, + sample_points: Sequence[np.typing.ArrayLike], + collapse_scalar: bool, + ) -> iris.cube.Cube: ... + + +class InterpolationScheme(Protocol): + def interpolator( # noqa: E704 # ruff formatting conflicts with flake8 + self, + cube: iris.cube.Cube, + coords: AuxCoord | DimCoord | str, + ) -> Interpolator: ... + + +class Regridder(Protocol): + def __call__( # noqa: E704 # ruff formatting conflicts with flake8 + self, + src: iris.cube.Cube, + ) -> iris.cube.Cube: ... + + +class RegriddingScheme(Protocol): + def regridder( # noqa: E704 # ruff formatting conflicts with flake8 + self, + src_grid: iris.cube.Cube, + target_grid: iris.cube.Cube, + ) -> Regridder: ... + + +class Linear: + """Describes the linear interpolation and regridding scheme. + + Use for interpolating or regridding over one or more orthogonal coordinates, typically for use with :meth:`iris.cube.Cube.interpolate()` or :meth:`iris.cube.Cube.regrid()`. """ - LINEAR_EXTRAPOLATION_MODES = list(EXTRAPOLATION_MODES.keys()) + ['linear'] + LINEAR_EXTRAPOLATION_MODES = list(EXTRAPOLATION_MODES.keys()) + ["linear"] - def __init__(self, extrapolation_mode='linear'): - """ - Linear interpolation and regridding scheme suitable for interpolating - or regridding over one or more orthogonal coordinates. + def __init__(self, extrapolation_mode="linear"): + """Linear interpolation and regridding scheme. - Kwargs: + Suitable for interpolating or regridding over one or more orthogonal + coordinates. - * extrapolation_mode: + Parameters + ---------- + extrapolation_mode : str Must be one of the following strings: - * 'extrapolate' or 'linear' - The extrapolation points - will be calculated by extending the gradient of the - closest two points. - * 'nan' - The extrapolation points will be be set to NaN. - * 'error' - A ValueError exception will be raised, notifying an - attempt to extrapolate. - * 'mask' - The extrapolation points will always be masked, even - if the source data is not a MaskedArray. - * 'nanmask' - If the source data is a MaskedArray the - extrapolation points will be masked. Otherwise they will be - set to NaN. - - The default mode of extrapolation is 'linear'. + * 'extrapolate' or 'linear' - The extrapolation points + will be calculated by extending the gradient of the + closest two points. + * 'nan' - The extrapolation points will be be set to NaN. + * 'error' - A ValueError exception will be raised, notifying an + attempt to extrapolate. + * 'mask' - The extrapolation points will always be masked, even + if the source data is not a MaskedArray. + * 'nanmask' - If the source data is a MaskedArray the + extrapolation points will be masked. Otherwise they will be + set to NaN. + * The default mode of extrapolation is 'linear'. """ if extrapolation_mode not in self.LINEAR_EXTRAPOLATION_MODES: - msg = 'Extrapolation mode {!r} not supported.' + msg = "Extrapolation mode {!r} not supported." raise ValueError(msg.format(extrapolation_mode)) self.extrapolation_mode = extrapolation_mode def __repr__(self): - return 'Linear({!r})'.format(self.extrapolation_mode) + return "Linear({!r})".format(self.extrapolation_mode) def _normalised_extrapolation_mode(self): mode = self.extrapolation_mode - if mode == 'linear': - mode = 'extrapolate' + if mode == "linear": + mode = "extrapolate" return mode def interpolator(self, cube, coords): - """ - Creates a linear interpolator to perform interpolation over the + """Create a linear interpolator to perform interpolation. + + Create a linear interpolator to perform interpolation over the given :class:`~iris.cube.Cube` specified by the dimensions of the given coordinates. @@ -2226,40 +2676,44 @@ def interpolator(self, cube, coords): constructing your own interpolator is preferable. These are detailed in the :ref:`user guide `. - Args: - - * cube: + Parameters + ---------- + cube : :class:`iris.cube.Cube` The source :class:`iris.cube.Cube` to be interpolated. - * coords: + coords : :class:`iris.cube.Cube` The names or coordinate instances that are to be interpolated over. - Returns: - A callable with the interface: - - `callable(sample_points, collapse_scalar=True)` - - where `sample_points` is a sequence containing an array of values + Returns + ------- + A callable with the interface: ``callable(sample_points, collapse_scalar=True)`` + Where `sample_points` is a sequence containing an array of values for each of the coordinates passed to this method, and - `collapse_scalar` determines whether to remove length one + ``collapse_scalar`` determines whether to remove length one dimensions in the result cube caused by scalar values in - `sample_points`. + ``sample_points``. + + The N arrays of values within ``sample_points`` will be used to + create an N-d grid of points that will then be sampled (rather than + just N points) The values for coordinates that correspond to date/times may optionally be supplied as datetime.datetime or cftime.datetime instances. For example, for the callable returned by: - `Linear().interpolator(cube, ['latitude', 'longitude'])`, + ``Linear().interpolator(cube, ['latitude', 'longitude'])``, sample_points must have the form - `[new_lat_values, new_lon_values]`. + ``[new_lat_values, new_lon_values]``. """ - return RectilinearInterpolator(cube, coords, 'linear', - self._normalised_extrapolation_mode()) + return RectilinearInterpolator( + cube, coords, "linear", self._normalised_extrapolation_mode() + ) def regridder(self, src_grid, target_grid): - """ + """Create a linear regridder to perform regridding. + Creates a linear regridder to perform regridding from the source grid to the target grid. @@ -2268,43 +2722,52 @@ def regridder(self, src_grid, target_grid): constructing your own regridder is preferable. These are detailed in the :ref:`user guide `. - Args: + Supports lazy regridding. Any + `chunks `__ + in horizontal dimensions will be combined before regridding. - * src_grid: + Parameters + ---------- + src_grid : :class:`~iris.cube.Cube` The :class:`~iris.cube.Cube` defining the source grid. - * target_grid: + target_grid : :class:`~iris.cube.Cube` The :class:`~iris.cube.Cube` defining the target grid. - Returns: - A callable with the interface: - - `callable(cube)` - - where `cube` is a cube with the same grid as `src_grid` - that is to be regridded to the `target_grid`. + Returns + ------- + A callable with the interface ``callable(cube)`` + Where `cube` is a cube with the same grid as ``src_grid`` + that is to be regridded to the ``target_grid``. """ - return RectilinearRegridder(src_grid, target_grid, 'linear', - self._normalised_extrapolation_mode()) + return RectilinearRegridder( + src_grid, + target_grid, + "linear", + self._normalised_extrapolation_mode(), + ) -class AreaWeighted(object): - """ +class AreaWeighted: + """Describes an area-weighted regridding scheme for regridding. + This class describes an area-weighted regridding scheme for regridding between 'ordinary' horizontal grids with separated X and Y coordinates in a common coordinate system. + Typically for use with :meth:`iris.cube.Cube.regrid()`. """ def __init__(self, mdtol=1): - """ - Area-weighted regridding scheme suitable for regridding between - different orthogonal XY grids in the same coordinate system. + """Area-weighted regridding scheme. - Kwargs: + Suitable for regridding between different orthogonal XY grids in the + same coordinate system. - * mdtol (float): + Parameters + ---------- + mdtol : float Tolerance of missing data. The value returned in each element of the returned array will be masked if the fraction of missing data exceeds mdtol. This fraction is calculated based on the area of @@ -2313,23 +2776,24 @@ def __init__(self, mdtol=1): will be masked if and only if all the overlapping elements of the source grid are masked. Defaults to 1. - .. Note: - Both sourge and target cubes must have an XY grid defined by - separate X and Y dimensions with dimension coordinates. - All of the XY dimension coordinates must also be bounded, and have - the same cooordinate system. + .. note:: + Both sourge and target cubes must have an XY grid defined by + separate X and Y dimensions with dimension coordinates. + All of the XY dimension coordinates must also be bounded, and have + the same coordinate system. """ if not (0 <= mdtol <= 1): - msg = 'Value for mdtol must be in range 0 - 1, got {}.' + msg = "Value for mdtol must be in range 0 - 1, got {}." raise ValueError(msg.format(mdtol)) self.mdtol = mdtol def __repr__(self): - return 'AreaWeighted(mdtol={})'.format(self.mdtol) + return "AreaWeighted(mdtol={})".format(self.mdtol) def regridder(self, src_grid_cube, target_grid_cube): - """ + """Create an area-weighted regridder to perform regridding. + Creates an area-weighted regridder to perform regridding from the source grid to the target grid. @@ -2338,68 +2802,74 @@ def regridder(self, src_grid_cube, target_grid_cube): constructing your own regridder is preferable. These are detailed in the :ref:`user guide `. - Args: + Supports lazy regridding. Any + `chunks `__ + in horizontal dimensions will be combined before regridding. - * src_grid_cube: + Parameters + ---------- + src_grid_cube : :class:`~iris.cube.Cube` The :class:`~iris.cube.Cube` defining the source grid. - * target_grid_cube: + target_grid_cube : :class:`~iris.cube.Cube` The :class:`~iris.cube.Cube` defining the target grid. - Returns: - A callable with the interface: - - `callable(cube)` - - where `cube` is a cube with the same grid as `src_grid_cube` + Returns + ------- + A callable with the interface `callable(cube)` + Where `cube` is a cube with the same grid as `src_grid_cube` that is to be regridded to the grid of `target_grid_cube`. """ - return AreaWeightedRegridder(src_grid_cube, target_grid_cube, - mdtol=self.mdtol) + return AreaWeightedRegridder(src_grid_cube, target_grid_cube, mdtol=self.mdtol) -class Nearest(object): - """ - This class describes the nearest-neighbour interpolation and regridding - scheme for interpolating or regridding over one or more orthogonal +class Nearest: + """Describe the nearest-neighbour interpolation and regridding scheme. + + For interpolating or regridding over one or more orthogonal coordinates, typically for use with :meth:`iris.cube.Cube.interpolate()` or :meth:`iris.cube.Cube.regrid()`. """ - def __init__(self, extrapolation_mode='extrapolate'): - """ - Nearest-neighbour interpolation and regridding scheme suitable for - interpolating or regridding over one or more orthogonal coordinates. - Kwargs: + def __init__(self, extrapolation_mode="extrapolate"): + """Nearest-neighbour interpolation and regridding scheme. - * extrapolation_mode: - Must be one of the following strings: + Suitable for interpolating or regridding over one or more orthogonal + coordinates. - * 'extrapolate' - The extrapolation points will take their - value from the nearest source point. - * 'nan' - The extrapolation points will be be set to NaN. - * 'error' - A ValueError exception will be raised, notifying an - attempt to extrapolate. - * 'mask' - The extrapolation points will always be masked, even - if the source data is not a MaskedArray. - * 'nanmask' - If the source data is a MaskedArray the - extrapolation points will be masked. Otherwise they will be - set to NaN. + Parameters + ---------- + extrapolation_mode : optional + Must be one of the following strings: - The default mode of extrapolation is 'extrapolate'. + * 'extrapolate' - The extrapolation points will take their + value from the nearest source point. + * 'nan' - The extrapolation points will be be set to NaN. + * 'error' - A ValueError exception will be raised, notifying an + attempt to extrapolate. + * 'mask' - The extrapolation points will always be masked, even + if the source data is not a MaskedArray. + * 'nanmask' - If the source data is a MaskedArray the + extrapolation points will be masked. Otherwise they will be + set to NaN. + * The default mode of extrapolation is 'extrapolate'. """ if extrapolation_mode not in EXTRAPOLATION_MODES: - msg = 'Extrapolation mode {!r} not supported.' + msg = "Extrapolation mode {!r} not supported." raise ValueError(msg.format(extrapolation_mode)) self.extrapolation_mode = extrapolation_mode def __repr__(self): - return 'Nearest({!r})'.format(self.extrapolation_mode) + return "Nearest({!r})".format(self.extrapolation_mode) def interpolator(self, cube, coords): - """ + """Perform interpolation over the given :class:`~iris.cube.Cube`. + + Perform interpolation over the given :class:`~iris.cube.Cube` specified + by the dimensions of the specified coordinates. + Creates a nearest-neighbour interpolator to perform interpolation over the given :class:`~iris.cube.Cube` specified by the dimensions of the specified coordinates. @@ -2409,40 +2879,38 @@ def interpolator(self, cube, coords): constructing your own interpolator is preferable. These are detailed in the :ref:`user guide `. - Args: - - * cube: + Parameters + ---------- + cube : The source :class:`iris.cube.Cube` to be interpolated. - * coords: + coords : The names or coordinate instances that are to be interpolated over. - Returns: - A callable with the interface: - - `callable(sample_points, collapse_scalar=True)` - - where `sample_points` is a sequence containing an array of values + Returns + ------- + A callable with the interface `callable(sample_points, collapse_scalar=True)`` + Where ``sample_points`` is a sequence containing an array of values for each of the coordinates passed to this method, and `collapse_scalar` determines whether to remove length one dimensions in the result cube caused by scalar values in - `sample_points`. + ``sample_points``. The values for coordinates that correspond to date/times may optionally be supplied as datetime.datetime or cftime.datetime instances. For example, for the callable returned by: - `Nearest().interpolator(cube, ['latitude', 'longitude'])`, + ``Nearest().interpolator(cube, ['latitude', 'longitude'])``, sample_points must have the form - `[new_lat_values, new_lon_values]`. + ``[new_lat_values, new_lon_values]``. """ - return RectilinearInterpolator(cube, coords, 'nearest', - self.extrapolation_mode) + return RectilinearInterpolator(cube, coords, "nearest", self.extrapolation_mode) def regridder(self, src_grid, target_grid): - """ + """Create a nearest-neighbour regridder. + Creates a nearest-neighbour regridder to perform regridding from the source grid to the target grid. @@ -2451,28 +2919,32 @@ def regridder(self, src_grid, target_grid): constructing your own regridder is preferable. These are detailed in the :ref:`user guide `. - Args: + Supports lazy regridding. Any + `chunks `__ + in horizontal dimensions will be combined before regridding. - * src_grid: + Parameters + ---------- + src_grid : :class:`~iris.cube.Cube` The :class:`~iris.cube.Cube` defining the source grid. - * target_grid: + target_grid : :class:`~iris.cube.Cube` The :class:`~iris.cube.Cube` defining the target grid. - Returns: - A callable with the interface: - - `callable(cube)` - - where `cube` is a cube with the same grid as `src_grid` + Returns + ------- + A callable with the interface `callable(cube)` + Where `cube` is a cube with the same grid as `src_grid` that is to be regridded to the `target_grid`. """ - return RectilinearRegridder(src_grid, target_grid, 'nearest', - self.extrapolation_mode) + return RectilinearRegridder( + src_grid, target_grid, "nearest", self.extrapolation_mode + ) -class UnstructuredNearest(object): - """ +class UnstructuredNearest: + """Nearest-neighbour regridding scheme. + This is a nearest-neighbour regridding scheme for regridding data whose horizontal (X- and Y-axis) coordinates are mapped to the *same* dimensions, rather than being orthogonal on independent dimensions. @@ -2491,40 +2963,34 @@ class UnstructuredNearest(object): must be. Otherwise, the corresponding X and Y coordinates must have the same units in the source and grid cubes. - .. Note:: + .. note:: Currently only supports regridding, not interpolation. - .. Note:: - This scheme performs essentially the same job as - :class:`iris.experimental.regrid.ProjectedUnstructuredNearest`. - That scheme is faster, but only works well on data in a limited - region of the globe, covered by a specified projection. - This approach is more rigorously correct and can be applied to global - datasets. - """ + # Note: the argument requirements are simply those of the underlying # regridder class, # :class:`iris.analysis.trajectory.UnstructuredNearestNeigbourRegridder`. def __init__(self): - """ - Nearest-neighbour interpolation and regridding scheme suitable for - interpolating or regridding from un-gridded data such as trajectories - or other data where the X and Y coordinates share the same dimensions. + """Nearest-neighbour interpolation and regridding scheme. + + Suitable for interpolating or regridding from un-gridded data such as + trajectories or other data where the X and Y coordinates share the same + dimensions. """ pass def __repr__(self): - return 'UnstructuredNearest()' + return "UnstructuredNearest()" # TODO: add interpolator usage # def interpolator(self, cube): def regridder(self, src_cube, target_grid): - """ - Creates a nearest-neighbour regridder, of the - :class:`~iris.analysis.trajectory.UnstructuredNearestNeigbourRegridder` + """Create a nearest-neighbour regridder. + + Using the :class:`~iris.analysis.trajectory.UnstructuredNearestNeigbourRegridder` type, to perform regridding from the source grid to the target grid. This can then be applied to any source data with the same structure as @@ -2535,37 +3001,39 @@ def regridder(self, src_cube, target_grid): constructing your own regridder is preferable. These are detailed in the :ref:`user guide `. - Args: + Does not support lazy regridding. - * src_cube: + Parameters + ---------- + src_cube : :class:`~iris.cube.Cube` The :class:`~iris.cube.Cube` defining the source grid. The X and Y coordinates can have any shape, but must be mapped over the same cube dimensions. - - * target_grid: + target_grid : :class:`~iris.cube.Cube` The :class:`~iris.cube.Cube` defining the target grid. The X and Y coordinates must be one-dimensional dimension coordinates, mapped to different dimensions. All other cube components are ignored. - Returns: - A callable with the interface: - - `callable(cube)` - - where `cube` is a cube with the same grid as `src_cube` + Returns + ------- + A callable with the interface `callable(cube)` + Where `cube` is a cube with the same grid as `src_cube` that is to be regridded to the `target_grid`. """ - from iris.analysis.trajectory import \ - UnstructuredNearestNeigbourRegridder + from iris.analysis.trajectory import UnstructuredNearestNeigbourRegridder + return UnstructuredNearestNeigbourRegridder(src_cube, target_grid) -class PointInCell(object): - """ - This class describes the point-in-cell regridding scheme for use - typically with :meth:`iris.cube.Cube.regrid()`. +class PointInCell: + """Describes the point-in-cell regridding scheme. + + For use typically with :meth:`iris.cube.Cube.regrid()`. + + Each result datapoint is an average over all source points that fall inside + that (bounded) target cell. The PointInCell regridder can regrid data from a source grid of any dimensionality and in any coordinate system. @@ -2581,25 +3049,31 @@ class PointInCell(object): coord_system. """ + def __init__(self, weights=None): - """ - Point-in-cell regridding scheme suitable for regridding over one - or more orthogonal coordinates. + """Point-in-cell regridding scheme. + + Point-in-cell regridding scheme suitable for regridding from a source + cube with X and Y coordinates all on the same dimensions, to a target + cube with bounded X and Y coordinates on separate X and Y dimensions. - Optional Args: + Each result datapoint is an average over all source points that fall + inside that (bounded) target cell. - * weights: - A :class:`numpy.ndarray` instance that defines the weights - for the grid cells of the source grid. Must have the same shape - as the data of the source grid. + Parameters + ---------- + weights : :class:`numpy.ndarray`, optional + Defines the weights for the grid cells of the source grid. Must + have the same shape as the data of the source grid. If unspecified, equal weighting is assumed. """ self.weights = weights def regridder(self, src_grid, target_grid): - """ - Creates a point-in-cell regridder to perform regridding from the + """Create a point-in-cell regridder. + + Create a point-in-cell regridder to perform regridding from the source grid to the target grid. Typically you should use :meth:`iris.cube.Cube.regrid` for @@ -2607,19 +3081,19 @@ def regridder(self, src_grid, target_grid): constructing your own regridder is preferable. These are detailed in the :ref:`user guide `. - Args: + Does not support lazy regridding. - * src_grid: + Parameters + ---------- + src_grid : The :class:`~iris.cube.Cube` defining the source grid. - * target_grid: + target_grid : The :class:`~iris.cube.Cube` defining the target grid. - Returns: - A callable with the interface: - - `callable(cube)` - - where `cube` is a cube with the same grid as `src_grid` + Returns + ------- + A callable with the interface `callable(cube)` + Where `cube` is a cube with the same grid as `src_grid` that is to be regridded to the `target_grid`. """ diff --git a/lib/iris/analysis/_area_weighted.py b/lib/iris/analysis/_area_weighted.py index 2c484e4626..a25a21bb47 100644 --- a/lib/iris/analysis/_area_weighted.py +++ b/lib/iris/analysis/_area_weighted.py @@ -1,51 +1,35 @@ -# (C) British Crown Copyright 2014 - 2016, Met Office +# Copyright Iris contributors # -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +import functools +import cf_units import numpy as np +import numpy.ma as ma +from scipy.sparse import csr_array +from iris._lazy_data import map_complete_blocks from iris.analysis._interpolation import get_xy_dim_coords, snapshot_grid -import iris -import iris.experimental.regrid as eregrid +from iris.analysis._regrid import RectilinearRegridder, _create_cube +import iris.analysis.cartography +import iris.coord_systems +from iris.util import _meshgrid -class AreaWeightedRegridder(object): - """ - This class provides support for performing area-weighted regridding. - - """ +class AreaWeightedRegridder: + """Provide support for performing area-weighted regridding.""" def __init__(self, src_grid_cube, target_grid_cube, mdtol=1): - """ - Create an area-weighted regridder for conversions between the source - and target grids. + """Create an area-weighted regridder for conversions between the source and target grids. - Args: - - * src_grid_cube: + Parameters + ---------- + src_grid_cube : :class:`~iris.cube.Cube` The :class:`~iris.cube.Cube` providing the source grid. - * target_grid_cube: + target_grid_cube : :class:`~iris.cube.Cube` The :class:`~iris.cube.Cube` providing the target grid. - - Kwargs: - - * mdtol (float): + mdtol : float, default=1 Tolerance of missing data. The value returned in each element of the returned array will be masked if the fraction of masked data exceeds mdtol. mdtol=0 means no missing data is tolerated while @@ -53,61 +37,660 @@ def __init__(self, src_grid_cube, target_grid_cube, mdtol=1): if all the contributing elements of data are masked. Defaults to 1. - .. Note:: + Notes + ----- + .. note:: - Both sourge and target cubes must have an XY grid defined by + Both source and target cubes must have an XY grid defined by separate X and Y dimensions with dimension coordinates. All of the XY dimension coordinates must also be bounded, and have - the same cooordinate system. + the same coordinate system. """ - # Snapshot the state of the cubes to ensure that the regridder is - # impervious to external changes to the original source cubes. + # Snapshot the state of the source cube to ensure that the regridder is + # impervious to external changes to the original cubes. self._src_grid = snapshot_grid(src_grid_cube) - self._target_grid = snapshot_grid(target_grid_cube) + # Missing data tolerance. if not (0 <= mdtol <= 1): - msg = 'Value for mdtol must be in range 0 - 1, got {}.' + msg = "Value for mdtol must be in range 0 - 1, got {}." raise ValueError(msg.format(mdtol)) self._mdtol = mdtol - # The need for an actual Cube is an implementation quirk caused by the - # current usage of the experimental regrid function. - self._target_grid_cube_cache = None - - @property - def _target_grid_cube(self): - if self._target_grid_cube_cache is None: - x, y = self._target_grid - data = np.empty((y.points.size, x.points.size)) - cube = iris.cube.Cube(data) - cube.add_dim_coord(y, 0) - cube.add_dim_coord(x, 1) - self._target_grid_cube_cache = cube - return self._target_grid_cube_cache + # Store regridding information + _regrid_info = _regrid_area_weighted_rectilinear_src_and_grid__prepare( + src_grid_cube, target_grid_cube + ) + ( + src_x, + src_y, + src_x_dim, + src_y_dim, + self.grid_x, + self.grid_y, + self.meshgrid_x, + self.meshgrid_y, + self.weights, + ) = _regrid_info def __call__(self, cube): - """ - Regrid this :class:`~iris.cube.Cube` onto the target grid of - this :class:`AreaWeightedRegridder`. + """Regrid :class:`~iris.cube.Cube` onto target grid :class:`AreaWeightedRegridder`. The given cube must be defined with the same grid as the source grid used to create this :class:`AreaWeightedRegridder`. - Args: + If the source cube has lazy data, the returned cube will also + have lazy data. - * cube: + Parameters + ---------- + cube : :class:`~iris.cube.Cube` A :class:`~iris.cube.Cube` to be regridded. - Returns: + Returns + ------- + :class:`~iris.cube.Cube` A cube defined with the horizontal dimensions of the target and the other dimensions from this cube. The data values of this cube will be converted to values on the new grid using area-weighted regridding. + Notes + ----- + .. note:: + + If the source cube has lazy data, + `chunks `__ + in the horizontal dimensions will be combined before regridding. + """ - if get_xy_dim_coords(cube) != self._src_grid: - raise ValueError('The given cube is not defined on the same ' - 'source grid as this regridder.') - return eregrid.regrid_area_weighted_rectilinear_src_and_grid( - cube, self._target_grid_cube, mdtol=self._mdtol) + src_x, src_y = get_xy_dim_coords(cube) + if (src_x, src_y) != self._src_grid: + raise ValueError( + "The given cube is not defined on the same " + "source grid as this regridder." + ) + src_x_dim = cube.coord_dims(src_x)[0] + src_y_dim = cube.coord_dims(src_y)[0] + _regrid_info = ( + src_x, + src_y, + src_x_dim, + src_y_dim, + self.grid_x, + self.grid_y, + self.meshgrid_x, + self.meshgrid_y, + self.weights, + ) + return _regrid_area_weighted_rectilinear_src_and_grid__perform( + cube, _regrid_info, mdtol=self._mdtol + ) + + +# +# Support routines, all originally in iris.experimental.regrid +# + + +def _get_xy_coords(cube): + """Return the x and y coordinates from a cube. + + This function will preferentially return a pair of dimension + coordinates (if there are more than one potential x or y dimension + coordinates a ValueError will be raised). If the cube does not have + a pair of x and y dimension coordinates it will return 1D auxiliary + coordinates (including scalars). If there is not one and only one set + of x and y auxiliary coordinates a ValueError will be raised. + + Having identified the x and y coordinates, the function checks that they + have equal coordinate systems and that they do not occupy the same + dimension on the cube. + + Parameters + ---------- + cube : :class:`iris.cube.Cube` + An instance of :class:`iris.cube.Cube`. + + Returns + ------- + tuple + A tuple containing the cube's x and y coordinates. + + """ + # Look for a suitable dimension coords first. + x_coords = cube.coords(axis="x", dim_coords=True) + if not x_coords: + # If there is no x coord in dim_coords look for scalars or + # monotonic coords in aux_coords. + x_coords = [ + coord + for coord in cube.coords(axis="x", dim_coords=False) + if coord.ndim == 1 and coord.is_monotonic() + ] + if len(x_coords) != 1: + raise ValueError( + "Cube {!r} must contain a single 1D x coordinate.".format(cube.name()) + ) + x_coord = x_coords[0] + + # Look for a suitable dimension coords first. + y_coords = cube.coords(axis="y", dim_coords=True) + if not y_coords: + # If there is no y coord in dim_coords look for scalars or + # monotonic coords in aux_coords. + y_coords = [ + coord + for coord in cube.coords(axis="y", dim_coords=False) + if coord.ndim == 1 and coord.is_monotonic() + ] + if len(y_coords) != 1: + raise ValueError( + "Cube {!r} must contain a single 1D y coordinate.".format(cube.name()) + ) + y_coord = y_coords[0] + + if x_coord.coord_system != y_coord.coord_system: + raise ValueError( + "The cube's x ({!r}) and y ({!r}) " + "coordinates must have the same coordinate " + "system.".format(x_coord.name(), y_coord.name()) + ) + + # The x and y coordinates must describe different dimensions + # or be scalar coords. + x_dims = cube.coord_dims(x_coord) + x_dim = None + if x_dims: + x_dim = x_dims[0] + + y_dims = cube.coord_dims(y_coord) + y_dim = None + if y_dims: + y_dim = y_dims[0] + + if x_dim is not None and y_dim == x_dim: + raise ValueError( + "The cube's x and y coords must not describe the same data dimension." + ) + + return x_coord, y_coord + + +def _get_bounds_in_units(coord, units, dtype): + """Return a copy of coord's bounds in the specified units and dtype. + + Return as contiguous bounds. + + """ + # The bounds are cast to dtype before conversion to prevent issues when + # mixing float32 and float64 types. + return coord.units.convert(coord.contiguous_bounds().astype(dtype), units).astype( + dtype + ) + + +def _regrid_area_weighted_rectilinear_src_and_grid__prepare(src_cube, grid_cube): + """First (setup) part of 'regrid_area_weighted_rectilinear_src_and_grid'. + + Check inputs and calculate related info. The 'regrid info' returned + can be re-used over many 2d slices. + + """ + # Get the 1d monotonic (or scalar) src and grid coordinates. + src_x, src_y = _get_xy_coords(src_cube) + grid_x, grid_y = _get_xy_coords(grid_cube) + + # Condition 1: All x and y coordinates must have contiguous bounds to + # define areas. + if ( + not src_x.is_contiguous() + or not src_y.is_contiguous() + or not grid_x.is_contiguous() + or not grid_y.is_contiguous() + ): + raise ValueError( + "The horizontal grid coordinates of both the source " + "and grid cubes must have contiguous bounds." + ) + + # Condition 2: Everything must have the same coordinate system. + src_cs = src_x.coord_system + grid_cs = grid_x.coord_system + if src_cs != grid_cs: + raise ValueError( + "The horizontal grid coordinates of both the source " + "and grid cubes must have the same coordinate " + "system." + ) + + # Condition 3: cannot create vector coords from scalars. + src_x_dims = src_cube.coord_dims(src_x) + src_x_dim = None + if src_x_dims: + src_x_dim = src_x_dims[0] + src_y_dims = src_cube.coord_dims(src_y) + src_y_dim = None + if src_y_dims: + src_y_dim = src_y_dims[0] + if ( + src_x_dim is None + and grid_x.shape[0] != 1 + or src_y_dim is None + and grid_y.shape[0] != 1 + ): + raise ValueError( + "The horizontal grid coordinates of source cube " + "includes scalar coordinates, but the new grid does " + "not. The new grid must not require additional data " + "dimensions to be created." + ) + + # Determine whether to calculate flat or spherical areas. + # Don't only rely on coord system as it may be None. + spherical = ( + isinstance( + src_cs, + (iris.coord_systems.GeogCS, iris.coord_systems.RotatedGeogCS), + ) + or src_x.units == "degrees" + or src_x.units == "radians" + ) + + # Get src and grid bounds in the same units. + x_units = cf_units.Unit("radians") if spherical else src_x.units + y_units = cf_units.Unit("radians") if spherical else src_y.units + + # Operate in highest precision. + src_dtype = np.promote_types(src_x.bounds.dtype, src_y.bounds.dtype) + grid_dtype = np.promote_types(grid_x.bounds.dtype, grid_y.bounds.dtype) + dtype = np.promote_types(src_dtype, grid_dtype) + + src_x_bounds = _get_bounds_in_units(src_x, x_units, dtype) + src_y_bounds = _get_bounds_in_units(src_y, y_units, dtype) + grid_x_bounds = _get_bounds_in_units(grid_x, x_units, dtype) + grid_y_bounds = _get_bounds_in_units(grid_y, y_units, dtype) + + # Create 2d meshgrids as required by _create_cube func. + meshgrid_x, meshgrid_y = _meshgrid(grid_x.points, grid_y.points) + + # Wrapping of longitudes. + if spherical: + modulus = x_units.modulus + else: + modulus = None + + def _calculate_regrid_area_weighted_weights( + src_x_bounds, + src_y_bounds, + grid_x_bounds, + grid_y_bounds, + spherical, + modulus=None, + ): + """Return weights matrix to be used in regridding.""" + src_shape = (len(src_x_bounds) - 1, len(src_y_bounds) - 1) + tgt_shape = (len(grid_x_bounds) - 1, len(grid_y_bounds) - 1) + + if spherical: + # Changing the dtype here replicates old regridding behaviour. + dtype = np.float64 + src_x_bounds = src_x_bounds.astype(dtype) + src_y_bounds = src_y_bounds.astype(dtype) + grid_x_bounds = grid_x_bounds.astype(dtype) + grid_y_bounds = grid_y_bounds.astype(dtype) + + src_y_bounds = np.sin(src_y_bounds) + grid_y_bounds = np.sin(grid_y_bounds) + x_info = _get_coord_to_coord_matrix_info( + src_x_bounds, grid_x_bounds, circular=spherical, mod=modulus + ) + y_info = _get_coord_to_coord_matrix_info(src_y_bounds, grid_y_bounds) + weights_matrix = _combine_xy_weights(x_info, y_info, src_shape, tgt_shape) + return weights_matrix + + weights = _calculate_regrid_area_weighted_weights( + src_x_bounds, + src_y_bounds, + grid_x_bounds, + grid_y_bounds, + spherical, + modulus, + ) + return ( + src_x, + src_y, + src_x_dim, + src_y_dim, + grid_x, + grid_y, + meshgrid_x, + meshgrid_y, + weights, + ) + + +def _regrid_area_weighted_rectilinear_src_and_grid__perform( + src_cube, regrid_info, mdtol +): + """Second (regrid) part of 'regrid_area_weighted_rectilinear_src_and_grid'. + + Perform the prepared regrid calculation on a single 2d cube. + + """ + ( + src_x, + src_y, + src_x_dim, + src_y_dim, + grid_x, + grid_y, + meshgrid_x, + meshgrid_y, + weights, + ) = regrid_info + + tgt_shape = (len(grid_y.points), len(grid_x.points)) + + new_data = map_complete_blocks( + src_cube, + _regrid_along_dims, + (src_y_dim, src_x_dim), + meshgrid_x.shape, + x_dim=src_x_dim, + y_dim=src_y_dim, + weights=weights, + tgt_shape=tgt_shape, + mdtol=mdtol, + ) + + # Wrap up the data as a Cube. + + _regrid_callback = functools.partial( + RectilinearRegridder._regrid, + src_x_coord=src_x, + src_y_coord=src_y, + sample_grid_x=meshgrid_x, + sample_grid_y=meshgrid_y, + ) + # TODO: investigate if an area weighted callback would be more appropriate. + # _regrid_callback = functools.partial( + # _regrid_along_dims, + # weights=weights, + # tgt_shape=tgt_shape, + # mdtol=mdtol, + # ) + + def regrid_callback(*args, **kwargs): + _data, dims = args + return _regrid_callback(_data, *dims, **kwargs) + + new_cube = _create_cube( + new_data, + src_cube, + [src_x_dim, src_y_dim], + [grid_x, grid_y], + 2, + regrid_callback, + ) + + # Slice out any length 1 dimensions. + indices = [slice(None, None)] * new_data.ndim + if src_x_dim is not None and new_cube.shape[src_x_dim] == 1: + indices[src_x_dim] = 0 + if src_y_dim is not None and new_cube.shape[src_y_dim] == 1: + indices[src_y_dim] = 0 + if 0 in indices: + new_cube = new_cube[tuple(indices)] + + return new_cube + + +def _get_coord_to_coord_matrix_info(src_bounds, tgt_bounds, circular=False, mod=None): + """First part of weight calculation. + + Calculate the weights contribution from a single pair of + coordinate bounds. Search for pairs of overlapping source and + target bounds and associate weights with them. + + Note: this assumes that the bounds are monotonic. + """ + # Calculate the number of cells represented by the bounds. + m = len(tgt_bounds) - 1 + n = len(src_bounds) - 1 + + # Ensure bounds are strictly increasing. + src_decreasing = src_bounds[0] > src_bounds[1] + tgt_decreasing = tgt_bounds[0] > tgt_bounds[1] + if src_decreasing: + src_bounds = src_bounds[::-1] + if tgt_decreasing: + tgt_bounds = tgt_bounds[::-1] + + if circular: + # For circular coordinates (e.g. longitude) account for source and + # target bounds which span different ranges (e.g. (-180, 180) vs + # (0, 360)). We ensure that all possible overlaps between source and + # target bounds are accounted for by including two copies of the + # source bounds, shifted appropriately by the modulus. + adjust = (tgt_bounds.min() - src_bounds.min()) // mod + src_bounds = src_bounds + (mod * adjust) + src_bounds = np.append(src_bounds, src_bounds + mod) + nn = (2 * n) + 1 + else: + nn = n + + # Before iterating through pairs of overlapping bounds, find an + # appropriate place to start iteration. Note that this assumes that + # the bounds are increasing. + i = max(np.searchsorted(tgt_bounds, src_bounds[0], side="right") - 1, 0) + j = max(np.searchsorted(src_bounds, tgt_bounds[0], side="right") - 1, 0) + + data = [] + rows = [] + cols = [] + + # Iterate through overlapping cells in the source and target bounds. + # For the sake of calculations, we keep track of the minimum value of + # the intersection of each cell. + floor = max(tgt_bounds[i], src_bounds[j]) + while i < m and j < nn: + # Record the current indices. + rows.append(i) + cols.append(j) + + # Determine the next indices and floor. + if tgt_bounds[i + 1] < src_bounds[j + 1]: + next_floor = tgt_bounds[i + 1] + next_i = i + 1 + elif tgt_bounds[i + 1] == src_bounds[j + 1]: + next_floor = tgt_bounds[i + 1] + next_i = i + 1 + j += 1 + else: + next_floor = src_bounds[j + 1] + next_i = i + j += 1 + + # Calculate and record the weight for the current overlapping cells. + weight = (next_floor - floor) / (tgt_bounds[i + 1] - tgt_bounds[i]) + data.append(weight) + + # Update indices and floor + i = next_i + floor = next_floor + + data = np.array(data) + rows = np.array(rows) + cols = np.array(cols) + + if circular: + # Remove out of bounds points. When the source bounds were duplicated + # an "out of bounds" cell was introduced between the two copies. + oob = np.where(cols == n) + data = np.delete(data, oob) + rows = np.delete(rows, oob) + cols = np.delete(cols, oob) + + # Wrap indices. Since we duplicated the source bounds there may be + # indices which are greater than n which will need to be corrected. + cols = cols % (n + 1) + + # Correct indices which were flipped due to reversing decreasing bounds. + if src_decreasing: + cols = n - cols - 1 + if tgt_decreasing: + rows = m - rows - 1 + + return data, rows, cols + + +def _combine_xy_weights(x_info, y_info, src_shape, tgt_shape): + """Second part of weight calculation. + + Combine the weights contributions from both pairs of coordinate + bounds (i.e. the source/target pairs for the x and y coords). + Return the result as a sparse array. + """ + x_src, y_src = src_shape + x_tgt, y_tgt = tgt_shape + src_size = x_src * y_src + tgt_size = x_tgt * y_tgt + x_weight, x_rows, x_cols = x_info + y_weight, y_rows, y_cols = y_info + + # Regridding weights will be applied to a flattened (y, x) array. + # Weights and indices are constructed in a way to account for this. + # Weights of the combined matrix are constructed by broadcasting + # the x_weights and y_weights. The resulting array contains every + # combination of x weight and y weight. Then we flatten this array. + xy_weight = y_weight[:, np.newaxis] * x_weight[np.newaxis, :] + xy_weight = xy_weight.flatten() + + # Given the x index and y index associated with a weight, calculate + # the equivalent index in the flattened (y, x) array. + xy_rows = (y_rows[:, np.newaxis] * x_tgt) + x_rows[np.newaxis, :] + xy_rows = xy_rows.flatten() + xy_cols = (y_cols[:, np.newaxis] * x_src) + x_cols[np.newaxis, :] + xy_cols = xy_cols.flatten() + + # Create a sparse matrix for efficient weight application. + combined_weights = csr_array( + (xy_weight, (xy_rows, xy_cols)), shape=(tgt_size, src_size) + ) + return combined_weights + + +def _standard_regrid_no_masks(data, weights, tgt_shape): + """Regrid unmasked data to an unmasked result. + + Assumes that the first two dimensions are the x-y grid. + """ + # Reshape data to a form suitable for matrix multiplication. + extra_shape = data.shape[:-2] + data = data.reshape(-1, np.prod(data.shape[-2:])) + + # Apply regridding weights. + # The order of matrix multiplication is chosen to be consistent + # with existing regridding code. + result = data @ weights.T + + # Reshape result to a suitable form. + result = result.reshape(*(extra_shape + tgt_shape)) + return result + + +def _standard_regrid(data, weights, tgt_shape, mdtol): + """Regrid data and handle masks. + + Assumes that the first two dimensions are the x-y grid. + """ + # This is set to keep consistent with legacy behaviour. + # This is likely to become switchable in the future, see: + # https://github.com/SciTools/iris/issues/5461 + oob_invalid = True + + data_shape = data.shape + if ma.is_masked(data): + unmasked = ~ma.getmaskarray(data) + # Calculate contribution from unmasked sources to each target point. + weight_sums = _standard_regrid_no_masks(unmasked, weights, tgt_shape) + else: + # If there are no masked points then all contributions will be + # from unmasked sources, so we can skip this calculation + weight_sums = np.ones(data_shape[:-2] + tgt_shape) + mdtol = max(mdtol, 1e-8) + tgt_mask = weight_sums > 1 - mdtol + # If out of bounds sources are treated the same as masked sources this + # will already have been calculated above, so we can skip this calculation. + if oob_invalid or not ma.is_masked(data): + # Calculate the proportion of each target cell which is covered by the + # source. For the sake of efficiency, this is calculated for a 2D slice + # which is then broadcast. + inbound_sums = _standard_regrid_no_masks( + np.ones(data_shape[-2:]), weights, tgt_shape + ) + if oob_invalid: + # Legacy behaviour, if the full area of a target cell does not lie + # in bounds it will be masked. + oob_mask = inbound_sums > 1 - 1e-8 + else: + # Note: this code is currently inaccessible. This code exists to lay + # the groundwork for future work which will make out of bounds + # behaviour switchable. + oob_mask = inbound_sums > 1 - mdtol + # Broadcast the mask to the shape of the full array + oob_slice = ((np.newaxis,) * len(data.shape[:-2])) + np.s_[:, :] + tgt_mask = tgt_mask * oob_mask[oob_slice] + + # Calculate normalisations. + normalisations = tgt_mask.astype(weight_sums.dtype) + normalisations[tgt_mask] /= weight_sums[tgt_mask] + + # Mask points in the result. + if ma.isMaskedArray(data): + # If the source is masked, the result should have a similar mask. + fill_value = data.fill_value + normalisations = ma.array(normalisations, mask=~tgt_mask, fill_value=fill_value) + elif np.any(~tgt_mask): + normalisations = ma.array(normalisations, mask=~tgt_mask) + + # Use input cube dtype or convert values to the smallest possible float + # dtype when necessary. + dtype = np.promote_types(data.dtype, np.float16) + + # Perform regridding on unmasked data. + result = _standard_regrid_no_masks(ma.filled(data, 0.0), weights, tgt_shape) + # Apply normalisations and masks to the regridded data. + result = result * normalisations + result = result.astype(dtype) + return result + + +def _regrid_along_dims(data, x_dim, y_dim, weights, tgt_shape, mdtol): + """Regrid data, handling masks and dimensions.""" + # Handle scalar coordinates. + # Note: scalar source coordinates are only handled when their + # corresponding target coordinate is also scalar. + num_scalar_dims = 0 + if x_dim is None: + num_scalar_dims += 1 + data = np.expand_dims(data, -1) + x_dim = -1 + if y_dim is None: + num_scalar_dims += 1 + data = np.expand_dims(data, -1) + y_dim = -1 + if num_scalar_dims == 2: + y_dim = -2 + + # Standard regridding expects the last two dimensions to belong + # to the y and x coordinate and will output as such. + # Axes are moved to account for an arbitrary dimension ordering. + data = np.moveaxis(data, [y_dim, x_dim], [-2, -1]) + result = _standard_regrid(data, weights, tgt_shape, mdtol) + result = np.moveaxis(result, [-2, -1], [y_dim, x_dim]) + + for _ in range(num_scalar_dims): + result = np.squeeze(result, axis=-1) + return result diff --git a/lib/iris/analysis/_grid_angles.py b/lib/iris/analysis/_grid_angles.py index 90876840af..80b73d81d7 100644 --- a/lib/iris/analysis/_grid_angles.py +++ b/lib/iris/analysis/_grid_angles.py @@ -1,46 +1,33 @@ -# (C) British Crown Copyright 2010 - 2018, Met Office +# Copyright Iris contributors # -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . -""" +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. + +"""Implement vector rotation by angles. + Code to implement vector rotation by angles, and inferring gridcell angles from coordinate points and bounds. """ -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa +import cartopy.crs as ccrs import numpy as np -import cartopy.crs as ccrs import iris def _3d_xyz_from_latlon(lon, lat): - """ - Return locations of (lon, lat) in 3D space. - - Args: + """Return locations of (lon, lat) in 3D space. - * lon, lat: (float array) + Parameters + ---------- + lon, lat : float array Arrays of longitudes and latitudes, in degrees. Both the same shape. - Returns: - - * xyz : (array, dtype=float64) + Returns + ------- + array of dtype=float64 Cartesian coordinates on a unit sphere. Shape is (3, ). The x / y / z coordinates are in xyz[0 / 1 / 2]. @@ -59,20 +46,19 @@ def _3d_xyz_from_latlon(lon, lat): def _latlon_from_xyz(xyz): - """ - Return arrays of lons+lats angles from xyz locations. - - Args: + """Return arrays of lons+lats angles from xyz locations. - * xyz: (array) + Parameters + ---------- + xyz : array Array of 3-D cartesian coordinates. Shape (3, ). - x / y / z values are in xyz[0 / 1 / 2], + x / y / z values are in xyz[0 / 1 / 2]. - Returns: - - * lonlat : (array) - longitude and latitude position angles, in degrees. + Returns + ------- + np.array + Longitude and latitude position angles, in degrees. Shape (2, ). The longitudes / latitudes are in lonlat[0 / 1]. @@ -84,15 +70,19 @@ def _latlon_from_xyz(xyz): def _angle(p, q, r): - """ + """Estimate grid-angles to true-Eastward direction. + Estimate grid-angles to true-Eastward direction from positions in the same grid row, but at increasing column (grid-Eastward) positions. {P, Q, R} are locations of consecutive points in the same grid row. - These could be successive points in a single grid, - e.g. {T(i-1,j), T(i,j), T(i+1,j)} - or a mixture of U/V and T gridpoints if row positions are aligned, - e.g. {v(i,j), f(i,j), v(i+1,j)}. + These could be successive points in a single grid, e.g.:: + + {T(i-1,j), T(i,j), T(i+1,j)} + + or a mixture of U/V and T gridpoints if row positions are aligned, e.g:: + + {v(i,j), f(i,j), v(i+1,j)}. Method: @@ -102,7 +92,7 @@ def _angle(p, q, r): Discriminate between +/- angles by comparing latitudes of P and R. Return NaN where any P-->R are zero. - .. NOTE:: + .. note:: This method assumes that the vector PR is parallel to the surface at the longitude of Q, as it uses the length of PR as the basis for @@ -114,17 +104,17 @@ def _angle(p, q, r): gridcell-orientation-angle arrays found in files output by the CICE model, which presumably uses an equivalent calculation. - Args: - - * p, q, r : (float array) + Parameters + ---------- + p, q, r : float array Arrays of angles, in degrees. All the same shape. Shape is (2, ). Longitudes / latitudes are in array[0 / 1]. - Returns: - - * angle : (float array) + Returns + ------- + float array Grid angles relative to true-East, in degrees. Positive when grid-East is anticlockwise from true-East. Shape is same as . @@ -148,9 +138,8 @@ def _angle(p, q, r): return np.rad2deg(psi) -def gridcell_angles(x, y=None, cell_angle_boundpoints='mid-lhs, mid-rhs'): - """ - Calculate gridcell orientations for an arbitrary 2-dimensional grid. +def gridcell_angles(x, y=None, cell_angle_boundpoints="mid-lhs, mid-rhs"): + """Calculate gridcell orientations for an arbitrary 2-dimensional grid. The input grid is defined by two 2-dimensional coordinate arrays with the same dimensions (ny, nx), specifying the geolocations of a 2D mesh. @@ -160,36 +149,30 @@ def gridcell_angles(x, y=None, cell_angle_boundpoints='mid-lhs, mid-rhs'): connected by wraparound. Input can be either two arrays, two coordinates, or a single cube - containing two suitable coordinates identified with the 'x' and'y' axes. + containing two suitable coordinates identified with the 'x' and 'y' axes. - Args: + The inputs (x [,y]) can be different, see the parameters section. - The inputs (x [,y]) can be any of the folliwing : - - * x (:class:`~iris.cube.Cube`): - a grid cube with 2D X and Y coordinates, identified by 'axis'. + Parameters + ---------- + x : :class:`~iris.cube.Cube` + A grid cube with 2D X and Y coordinates, identified by 'axis'. The coordinates must be 2-dimensional with the same shape. The two dimensions represent grid dimensions in the order Y, then X. - - * x, y (:class:`~iris.coords.Coord`): + x, y : :class:`~iris.coords.Coord` X and Y coordinates, specifying grid locations on the globe. The coordinates must be 2-dimensional with the same shape. The two dimensions represent grid dimensions in the order Y, then X. If there is no coordinate system, they are assumed to be true longitudes and latitudes. Units must convertible to 'degrees'. - - * x, y (2-dimensional arrays of same shape (ny, nx)): - longitude and latitude cell center locations, in degrees. + x, y : 2-dimensional arrays of same shape (ny, nx) + Longitude and latitude cell center locations, in degrees. The two dimensions represent grid dimensions in the order Y, then X. - - * x, y (3-dimensional arrays of same shape (ny, nx, 4)): - longitude and latitude cell bounds, in degrees. + x, y : 3-dimensional arrays of same shape (ny, nx, 4) + Longitude and latitude cell bounds, in degrees. The first two dimensions are grid dimensions in the order Y, then X. The last index maps cell corners anticlockwise from bottom-left. - - Optional Args: - - * cell_angle_boundpoints (string): + cell_angle_boundpoints : str, default="mid-lhs, mid-rhs" Controls which gridcell bounds locations are used to calculate angles, if the inputs are bounds or bounded coordinates. Valid values are 'lower-left, lower-right', which takes the angle from @@ -197,68 +180,79 @@ def gridcell_angles(x, y=None, cell_angle_boundpoints='mid-lhs, mid-rhs'): takes an angles between the average of the left-hand and right-hand pairs of corners. The default is 'mid-lhs, mid-rhs'. - Returns: - - angles : (2-dimensional cube) - - Cube of angles of grid-x vector from true Eastward direction for - each gridcell, in degrees. - It also has "true" longitude and latitude coordinates, with no - coordinate system. - When the input has coords, then the output ones are identical if - the inputs are true-latlons, otherwise they are transformed - true-latlon versions. - When the input has bounded coords, then the output coords have - matching bounds and centrepoints (possibly transformed). - When the input is 2d arrays, or has unbounded coords, then the - output coords have matching points and no bounds. - When the input is 3d arrays, then the output coords have matching - bounds, and the centrepoints are an average of the 4 boundpoints. + Returns + ------- + 2-dimensional cube + Cube of angles of grid-x vector from true Eastward direction for + each gridcell, in degrees. + It also has "true" longitude and latitude coordinates, with no + coordinate system. + When the input has coords, then the output ones are identical if + the inputs are true-latlons, otherwise they are transformed + true-latlon versions. + When the input has bounded coords, then the output coords have + matching bounds and centrepoints (possibly transformed). + When the input is 2d arrays, or has unbounded coords, then the + output coords have matching points and no bounds. + When the input is 3d arrays, then the output coords have matching + bounds, and the centrepoints are an average of the 4 boundpoints. """ cube = None - if hasattr(x, 'add_aux_coord'): + if hasattr(x, "add_aux_coord"): # Passed a cube : extract 'x' and ;'y' axis coordinates. cube = x # Save for later checking. - x, y = cube.coord(axis='x'), cube.coord(axis='y') + x, y = cube.coord(axis="x"), cube.coord(axis="y") # Now should have either 2 coords or 2 arrays. - if not hasattr(x, 'shape') or not hasattr(y, 'shape'): - msg = ('Inputs (x,y) must have array shape property.' - 'Got type(x)={} and type(y)={}.') + if not hasattr(x, "shape") or not hasattr(y, "shape"): + msg = ( + "Inputs (x,y) must have array shape property." + "Got type(x)={} and type(y)={}." + ) raise ValueError(msg.format(type(x), type(y))) x_coord, y_coord = None, None - if hasattr(x, 'bounds') and hasattr(y, 'bounds'): + if hasattr(x, "bounds") and hasattr(y, "bounds"): # x and y are Coords. x_coord, y_coord = x.copy(), y.copy() # They must be angles : convert into degrees for coord in (x_coord, y_coord): - if not coord.units.is_convertible('degrees'): - msg = ('Input X and Y coordinates must have angular ' - 'units. Got units of "{!s}" and "{!s}".') + if not coord.units.is_convertible("degrees"): + msg = ( + "Input X and Y coordinates must have angular " + 'units. Got units of "{!s}" and "{!s}".' + ) raise ValueError(msg.format(x_coord.units, y_coord.units)) - coord.convert_units('degrees') + coord.convert_units("degrees") if x_coord.ndim != 2 or y_coord.ndim != 2: - msg = ('Coordinate inputs must have 2-dimensional shape. ' - 'Got x-shape of {} and y-shape of {}.') + msg = ( + "Coordinate inputs must have 2-dimensional shape. " + "Got x-shape of {} and y-shape of {}." + ) raise ValueError(msg.format(x_coord.shape, y_coord.shape)) if x_coord.shape != y_coord.shape: - msg = ('Coordinate inputs must have same shape. ' - 'Got x-shape of {} and y-shape of {}.') + msg = ( + "Coordinate inputs must have same shape. " + "Got x-shape of {} and y-shape of {}." + ) raise ValueError(msg.format(x_coord.shape, y_coord.shape)) if cube: x_dims, y_dims = (cube.coord_dims(co) for co in (x, y)) if x_dims != y_dims: - msg = ('X and Y coordinates must have the same cube ' - 'dimensions. Got x-dims = {} and y-dims = {}.') + msg = ( + "X and Y coordinates must have the same cube " + "dimensions. Got x-dims = {} and y-dims = {}." + ) raise ValueError(msg.format(x_dims, y_dims)) cs = x_coord.coord_system if y_coord.coord_system != cs: - msg = ('Coordinate inputs must have same coordinate system. ' - 'Got x of {} and y of {}.') + msg = ( + "Coordinate inputs must have same coordinate system. " + "Got x of {} and y of {}." + ) raise ValueError(msg.format(cs, y_coord.coord_system)) # Base calculation on bounds if we have them, or points as a fallback. @@ -292,18 +286,24 @@ def transform_xy_arrays(x, y): xpts, ypts = transform_xy_arrays(xpts, ypts) xbds, ybds = transform_xy_arrays(xbds, ybds) x_coord = iris.coords.AuxCoord( - points=xpts, bounds=xbds, - standard_name='longitude', units='degrees') + points=xpts, + bounds=xbds, + standard_name="longitude", + units="degrees", + ) y_coord = iris.coords.AuxCoord( - points=ypts, bounds=ybds, - standard_name='latitude', units='degrees') + points=ypts, + bounds=ybds, + standard_name="latitude", + units="degrees", + ) - elif hasattr(x, 'bounds') or hasattr(y, 'bounds'): + elif hasattr(x, "bounds") or hasattr(y, "bounds"): # One was a Coord, and the other not ? - is_and_not = ('x', 'y') - if hasattr(y, 'bounds'): + is_and_not = ("x", "y") + if hasattr(y, "bounds"): is_and_not = reversed(is_and_not) - msg = 'Input {!r} is a Coordinate, but {!r} is not.' + msg = "Input {!r} is a Coordinate, but {!r} is not." raise ValueError(msg.format(*is_and_not)) # Now have either 2 points arrays (ny, nx) or 2 bounds arrays (ny, nx, 4). @@ -327,30 +327,32 @@ def transform_xy_arrays(x, y): rhs = np.roll(mid, -1, 2) if not x_coord: # Create coords for result cube : with no bounds. - y_coord = iris.coords.AuxCoord(x, standard_name='latitude', - units='degrees') - x_coord = iris.coords.AuxCoord(y, standard_name='longitude', - units='degrees') + y_coord = iris.coords.AuxCoord(x, standard_name="latitude", units="degrees") + x_coord = iris.coords.AuxCoord( + y, standard_name="longitude", units="degrees" + ) else: # Data is bounds arrays. # Use gridcell corners at different grid-x positions as references. # NOTE: so with bounds, we *don't* need full circular longitudes. xyz = _3d_xyz_from_latlon(x, y) # Support two different choices of reference points locations. - angle_boundpoints_vals = {'mid-lhs, mid-rhs': '03_to_12', - 'lower-left, lower-right': '0_to_1'} + angle_boundpoints_vals = { + "mid-lhs, mid-rhs": "03_to_12", + "lower-left, lower-right": "0_to_1", + } bounds_pos = angle_boundpoints_vals.get(cell_angle_boundpoints) - if bounds_pos == '0_to_1': + if bounds_pos == "0_to_1": lhs_xyz = xyz[..., 0] rhs_xyz = xyz[..., 1] - elif bounds_pos == '03_to_12': + elif bounds_pos == "03_to_12": lhs_xyz = 0.5 * (xyz[..., 0] + xyz[..., 3]) rhs_xyz = 0.5 * (xyz[..., 1] + xyz[..., 2]) else: - msg = ('unrecognised cell_angle_boundpoints of "{}", ' - 'must be one of {}') - raise ValueError(msg.format(cell_angle_boundpoints, - list(angle_boundpoints_vals.keys()))) + msg = 'unrecognised cell_angle_boundpoints of "{}", ' "must be one of {}" + raise ValueError( + msg.format(cell_angle_boundpoints, list(angle_boundpoints_vals.keys())) + ) if not x_coord: # Create bounded coords for result cube. # Use average of lhs+rhs points in 3d to get 'mid' points, @@ -359,11 +361,17 @@ def transform_xy_arrays(x, y): mid_latlons = _latlon_from_xyz(mid_xyz) # Create coords with given bounds, and averaged centrepoints. x_coord = iris.coords.AuxCoord( - points=mid_latlons[0], bounds=x, - standard_name='longitude', units='degrees') + points=mid_latlons[0], + bounds=x, + standard_name="longitude", + units="degrees", + ) y_coord = iris.coords.AuxCoord( - points=mid_latlons[1], bounds=y, - standard_name='latitude', units='degrees') + points=mid_latlons[1], + bounds=y, + standard_name="latitude", + units="degrees", + ) # Convert lhs and rhs points back to latlon form -- IN DEGREES ! lhs = _latlon_from_xyz(lhs_xyz) @@ -373,22 +381,20 @@ def transform_xy_arrays(x, y): # Do the angle calcs, and return as a suitable cube. angles = _angle(lhs, mid, rhs) - result = iris.cube.Cube(angles, - long_name='gridcell_angle_from_true_east', - units='degrees') + result = iris.cube.Cube( + angles, long_name="gridcell_angle_from_true_east", units="degrees" + ) result.add_aux_coord(x_coord, (0, 1)) result.add_aux_coord(y_coord, (0, 1)) return result -def rotate_grid_vectors(u_cube, v_cube, grid_angles_cube=None, - grid_angles_kwargs=None): - """ - Rotate distance vectors from grid-oriented to true-latlon-oriented. +def rotate_grid_vectors(u_cube, v_cube, grid_angles_cube=None, grid_angles_kwargs=None): + """Rotate distance vectors from grid-oriented to true-latlon-oriented. Can also rotate by arbitrary angles, if they are passed in. - .. Note:: + .. note:: This operation overlaps somewhat in function with :func:`iris.analysis.cartography.rotate_winds`. @@ -400,33 +406,32 @@ def rotate_grid_vectors(u_cube, v_cube, grid_angles_cube=None, complex meshes defined by two-dimensional coordinates, such as most ocean grids. - Args: - - * u_cube, v_cube : (cube) + Parameters + ---------- + u_cube, v_cube : cube Cubes of grid-u and grid-v vector components. Units should be differentials of true-distance, e.g. 'm/s'. - - Optional args: - - * grid_angles_cube : (cube) - gridcell orientation angles. + grid_angles_cube : cube, optional + Gridcell orientation angles. Units must be angular, i.e. can be converted to 'radians'. If not provided, grid angles are estimated from 'u_cube' using the :func:`gridcell_angles` method. - - * grid_angles_kwargs : (dict or None) + **grid_angles_kwargs : dict, optional Additional keyword args to be passed to the :func:`gridcell_angles` method, if it is used. - Returns: - - true_u, true_v : (cube) - Cubes of true-north oriented vector components. - Units are same as inputs. + Returns + ------- + (cube, cube) + Tuple of cubes of true-north oriented vector components. + Units are same as inputs. - .. Note:: + Notes + ----- + Vector magnitudes will always be the same as the inputs. - Vector magnitudes will always be the same as the inputs. + This function does not maintain laziness when called; it realises data. + See more at :doc:`/userguide/real_and_lazy_data`. """ u_out, v_out = (cube.copy() for cube in (u_cube, v_cube)) @@ -434,16 +439,16 @@ def rotate_grid_vectors(u_cube, v_cube, grid_angles_cube=None, grid_angles_kwargs = grid_angles_kwargs or {} grid_angles_cube = gridcell_angles(u_cube, **grid_angles_kwargs) gridangles = grid_angles_cube.copy() - gridangles.convert_units('radians') + gridangles.convert_units("radians") uu, vv, aa = (cube.data for cube in (u_out, v_out, gridangles)) - mags = np.sqrt(uu*uu + vv*vv) + mags = np.sqrt(uu * uu + vv * vv) angs = np.arctan2(vv, uu) + aa uu, vv = mags * np.cos(angs), mags * np.sin(angs) # Promote all to masked arrays, and also apply mask at bad (NaN) angles. mask = np.isnan(aa) for cube in (u_out, v_out, aa): - if hasattr(cube.data, 'mask'): + if hasattr(cube.data, "mask"): mask |= cube.data.mask u_out.data = np.ma.masked_array(uu, mask=mask) v_out.data = np.ma.masked_array(vv, mask=mask) diff --git a/lib/iris/analysis/_interpolation.py b/lib/iris/analysis/_interpolation.py index fb6c3ce8d0..6904c5ae4f 100644 --- a/lib/iris/analysis/_interpolation.py +++ b/lib/iris/analysis/_interpolation.py @@ -1,58 +1,39 @@ -# (C) British Crown Copyright 2014 - 2019, Met Office +# Copyright Iris contributors # -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. """A collection of helpers for interpolation.""" -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - from collections import namedtuple from itertools import product import operator -from numpy.lib.stride_tricks import as_strided import numpy as np +from numpy.lib.stride_tricks import as_strided import numpy.ma as ma -from iris.analysis._scipy_interpolate import _RegularGridInterpolator -from iris.analysis.cartography import wrap_lons as wrap_circular_points -from iris.coords import DimCoord, AuxCoord +from iris.coords import AuxCoord, DimCoord import iris.util - _DEFAULT_DTYPE = np.float16 -ExtrapolationMode = namedtuple('ExtrapolationMode', ['bounds_error', - 'fill_value', - 'mask_fill_value', - 'force_mask']) +ExtrapolationMode = namedtuple( + "ExtrapolationMode", + ["bounds_error", "fill_value", "mask_fill_value", "force_mask"], +) EXTRAPOLATION_MODES = { - 'extrapolate': ExtrapolationMode(False, None, None, False), - 'error': ExtrapolationMode(True, 0, 0, False), - 'nan': ExtrapolationMode(False, np.nan, 0, False), - 'mask': ExtrapolationMode(False, np.nan, 1, True), - 'nanmask': ExtrapolationMode(False, np.nan, 1, False) + "extrapolate": ExtrapolationMode(False, None, None, False), + "error": ExtrapolationMode(True, 0, 0, False), + "nan": ExtrapolationMode(False, np.nan, 0, False), + "mask": ExtrapolationMode(False, np.nan, 1, True), + "nanmask": ExtrapolationMode(False, np.nan, 1, False), } def _canonical_sample_points(coords, sample_points): - """ - Return the canonical form of the points values. + """Return the canonical form of the points values. Ensures that any points supplied as datetime objects, or similar, are converted to their numeric form. @@ -61,12 +42,14 @@ def _canonical_sample_points(coords, sample_points): canonical_sample_points = [] for coord, points in zip(coords, sample_points): if coord.units.is_time_reference(): + def convert_date(date): try: date = coord.units.date2num(date) except AttributeError: pass return date + convert_dates = np.vectorize(convert_date, [np.dtype(float)]) points = convert_dates(points) canonical_sample_points.append(points) @@ -74,19 +57,19 @@ def convert_date(date): def extend_circular_coord(coord, points): - """ - Return coordinates points with a shape extended by one + """Return coordinate points with a shape extended by one. + This is common when dealing with circular coordinates. """ - modulus = np.array(coord.units.modulus or 0, - dtype=coord.dtype) + modulus = np.array(coord.units.modulus or 0, dtype=coord.dtype) points = np.append(points, points[0] + modulus) return points def extend_circular_coord_and_data(coord, data, coord_dim): - """ + """Return coordinate points and data with a shape extended by one in the provided axis. + Return coordinate points and a data array with a shape extended by one in the coord_dim axis. This is common when dealing with circular coordinates. @@ -102,27 +85,26 @@ def extend_circular_data(data, coord_dim): coord_slice_in_cube[coord_dim] = slice(0, 1) mod = ma if ma.isMaskedArray(data) else np - data = mod.concatenate((data, - data[tuple(coord_slice_in_cube)]), - axis=coord_dim) + data = mod.concatenate((data, data[tuple(coord_slice_in_cube)]), axis=coord_dim) return data def get_xy_dim_coords(cube): - """ - Return the x and y dimension coordinates from a cube. + """Return the x and y dimension coordinates from a cube. This function raises a ValueError if the cube does not contain one and only one set of x and y dimension coordinates. It also raises a ValueError if the identified x and y coordinates do not have coordinate systems that are equal. - Args: - - * cube: + Parameters + ---------- + cube : :class:`iris.cube.Cube` An instance of :class:`iris.cube.Cube`. - Returns: + Returns + ------- + tuple A tuple containing the cube's x and y dimension coordinates. """ @@ -130,99 +112,98 @@ def get_xy_dim_coords(cube): def get_xy_coords(cube, dim_coords=False): - """ - Return the x and y coordinates from a cube. + """Return the x and y coordinates from a cube. This function raises a ValueError if the cube does not contain one and only one set of x and y coordinates. It also raises a ValueError if the identified x and y coordinates do not have coordinate systems that are equal. - Args: - - * cube: + Parameters + ---------- + cube : :class:`iris.cube.Cube` An instance of :class:`iris.cube.Cube`. - - Kwargs: - - * dim_coords: + dim_coords : bool, default=False Set this to True to only return dimension coordinates. Defaults to False. - Returns: + Returns + ------- + tuple A tuple containing the cube's x and y dimension coordinates. """ - x_coords = cube.coords(axis='x', dim_coords=dim_coords) + x_coords = cube.coords(axis="x", dim_coords=dim_coords) if len(x_coords) != 1 or x_coords[0].ndim != 1: - raise ValueError('Cube {!r} must contain a single 1D x ' - 'coordinate.'.format(cube.name())) + raise ValueError( + "Cube {!r} must contain a single 1D x coordinate.".format(cube.name()) + ) x_coord = x_coords[0] - y_coords = cube.coords(axis='y', dim_coords=dim_coords) + y_coords = cube.coords(axis="y", dim_coords=dim_coords) if len(y_coords) != 1 or y_coords[0].ndim != 1: - raise ValueError('Cube {!r} must contain a single 1D y ' - 'coordinate.'.format(cube.name())) + raise ValueError( + "Cube {!r} must contain a single 1D y coordinate.".format(cube.name()) + ) y_coord = y_coords[0] if x_coord.coord_system != y_coord.coord_system: - raise ValueError("The cube's x ({!r}) and y ({!r}) " - "coordinates must have the same coordinate " - "system.".format(x_coord.name(), y_coord.name())) + raise ValueError( + "The cube's x ({!r}) and y ({!r}) " + "coordinates must have the same coordinate " + "system.".format(x_coord.name(), y_coord.name()) + ) return x_coord, y_coord def snapshot_grid(cube): - """ - Helper function that returns deep copies of lateral (dimension) coordinates - from a cube. - - """ + """Return deep copies of lateral (dimension) coordinates from a cube.""" x, y = get_xy_dim_coords(cube) return x.copy(), y.copy() -class RectilinearInterpolator(object): - """ +class RectilinearInterpolator: + """Provide support for performing nearest-neighbour or linear interpolation. + This class provides support for performing nearest-neighbour or linear interpolation over one or more orthogonal dimensions. """ - def __init__(self, src_cube, coords, method, extrapolation_mode): - """ - Perform interpolation over one or more orthogonal coordinates. - Args: + def __init__(self, src_cube, coords, method, extrapolation_mode): + """Perform interpolation over one or more orthogonal coordinates. - * src_cube: + Parameters + ---------- + src_cube : :class:`iris.cube.Cube` The :class:`iris.cube.Cube` which is to be interpolated. - * coords: + coords : The names or coordinate instances which are to be - interpolated over - * method: + interpolated over. + method : Either 'linear' or 'nearest'. - * extrapolation_mode: + extrapolation_mode : str Must be one of the following strings: - * 'extrapolate' - The extrapolation points will be calculated - according to the method. The 'linear' method extends the - gradient of the closest two points. The 'nearest' method - uses the value of the closest point. - * 'nan' - The extrapolation points will be be set to NaN. - * 'error' - A ValueError exception will be raised, notifying an - attempt to extrapolate. - * 'mask' - The extrapolation points will always be masked, even - if the source data is not a MaskedArray. - * 'nanmask' - If the source data is a MaskedArray the - extrapolation points will be masked. Otherwise they will be - set to NaN. + * 'extrapolate' - The extrapolation points will be calculated + according to the method. The 'linear' method extends the + gradient of the closest two points. The 'nearest' method + uses the value of the closest point. + * 'nan' - The extrapolation points will be be set to NaN. + * 'error' - A ValueError exception will be raised, notifying an + attempt to extrapolate. + * 'mask' - The extrapolation points will always be masked, even + if the source data is not a MaskedArray. + * 'nanmask' - If the source data is a MaskedArray the + extrapolation points will be masked. Otherwise they will be + set to NaN. """ # Trigger any deferred loading of the source cube's data and snapshot # its state to ensure that the interpolator is impervious to external # changes to the original source cube. The data is loaded to prevent - # the snaphot having lazy data, avoiding the potential for the + # the snapshot having lazy data, avoiding the potential for the # same data to be loaded again and again. if src_cube.has_lazy_data(): src_cube.data @@ -230,13 +211,13 @@ def __init__(self, src_cube, coords, method, extrapolation_mode): # Coordinates defining the dimensions to be interpolated. self._src_coords = [self._src_cube.coord(coord) for coord in coords] # Whether to use linear or nearest-neighbour interpolation. - if method not in ('linear', 'nearest'): - msg = 'Interpolation method {!r} not supported'.format(method) + if method not in ("linear", "nearest"): + msg = "Interpolation method {!r} not supported".format(method) raise ValueError(msg) self._method = method # The extrapolation mode. if extrapolation_mode not in EXTRAPOLATION_MODES: - msg = 'Extrapolation mode {!r} not supported.' + msg = "Extrapolation mode {!r} not supported." raise ValueError(msg.format(extrapolation_mode)) self._mode = extrapolation_mode # The point values defining the dimensions to be interpolated. @@ -270,18 +251,15 @@ def extrapolation_mode(self): return self._mode def _account_for_circular(self, points, data): - """ - Extend the given data array, and re-centralise coordinate points - for circular (1D) coordinates. + """Extend data array, and re-centralise coordinate points for circular (1D) coordinates.""" + from iris.analysis.cartography import wrap_lons - """ - for (circular, modulus, index, dim, offset) in self._circulars: + for circular, modulus, index, dim, offset in self._circulars: if modulus: # Map all the requested values into the range of the source # data (centred over the centre of the source data to allow # extrapolation where required). - points[:, index] = wrap_circular_points(points[:, index], - offset, modulus) + points[:, index] = wrap_lons(points[:, index], offset, modulus) # Also extend data if circular (to match the coord points, which # 'setup' already extended). @@ -293,25 +271,24 @@ def _account_for_circular(self, points, data): def _account_for_inverted(self, data): if np.any(self._coord_decreasing): dim_slices = [slice(None)] * data.ndim - for interp_dim, flip in zip(self._interp_dims, - self._coord_decreasing): + for interp_dim, flip in zip(self._interp_dims, self._coord_decreasing): if flip: dim_slices[interp_dim] = slice(-1, None, -1) data = data[tuple(dim_slices)] return data def _interpolate(self, data, interp_points): - """ - Interpolate a data array over N dimensions. + """Interpolate a data array over N dimensions. Create and cache the underlying interpolator instance before invoking it to perform interpolation over the data at the given coordinate point values. - * data (ndarray): + Parameters + ---------- + data : ndarray A data array, to be interpolated in its first 'N' dimensions. - - * interp_points (ndarray): + interp_points : ndarray An array of interpolation coordinate values. Its shape is (..., N) where N is the number of interpolation dimensions. @@ -320,15 +297,18 @@ def _interpolate(self, data, interp_points): The other (leading) dimensions index over the different required sample points. - Returns: - - A :class:`np.ndarray`. Its shape is "points_shape + extra_shape", + Returns + ------- + :class:`np.ndarray`. + Its shape is "points_shape + extra_shape", where "extra_shape" is the remaining non-interpolated dimensions of the data array (i.e. 'data.shape[N:]'), and "points_shape" is the leading dimensions of interp_points, (i.e. 'interp_points.shape[:-1]'). """ + from iris.analysis._scipy_interpolate import _RegularGridInterpolator + dtype = self._interpolated_dtype(data.dtype) if data.dtype != dtype: # Perform dtype promotion. @@ -341,8 +321,12 @@ def _interpolate(self, data, interp_points): # some unnecessary checks on the fill_value parameter, # so we set it afterwards instead. Sneaky. ;-) self._interpolator = _RegularGridInterpolator( - self._src_points, data, method=self.method, - bounds_error=mode.bounds_error, fill_value=None) + self._src_points, + data, + method=self.method, + bounds_error=mode.bounds_error, + fill_value=None, + ) else: self._interpolator.values = data @@ -365,17 +349,14 @@ def _interpolate(self, data, interp_points): self._interpolator.fill_value = mode.mask_fill_value self._interpolator.values = src_mask mask_fraction = self._interpolator(interp_points) - new_mask = (mask_fraction > 0) + new_mask = mask_fraction > 0 if ma.isMaskedArray(data) or np.any(new_mask): result = np.ma.MaskedArray(result, new_mask) return result def _resample_coord(self, sample_points, coord, coord_dims): - """ - Interpolate the given coordinate at the provided sample points. - - """ + """Interpolate the given coordinate at the provided sample points.""" # NB. This section is ripe for improvement: # - Internally self._points() expands coord.points to the same # N-dimensional shape as the cube's data, but it doesn't @@ -384,8 +365,10 @@ def _resample_coord(self, sample_points, coord, coord_dims): # - By expanding to N dimensions self._points() is doing # unnecessary work. data = self._points(sample_points, coord.points, coord_dims) - index = tuple(0 if dim not in coord_dims else slice(None) - for dim in range(self._src_cube.ndim)) + index = tuple( + 0 if dim not in coord_dims else slice(None) + for dim in range(self._src_cube.ndim) + ) new_points = data[index] # Watch out for DimCoord instances that are no longer monotonic # after the resampling. @@ -397,7 +380,8 @@ def _resample_coord(self, sample_points, coord, coord_dims): return new_coord def _setup(self): - """ + """Perform initial start-up configuration and validation. + Perform initial start-up configuration and validation based on the cube and the specified coordinates to be interpolated over. @@ -416,97 +400,97 @@ def _setup(self): # Force all coordinates to be monotonically increasing. # Generally this isn't always necessary for a rectilinear # interpolator, but it is a common requirement.) - decreasing = (coord.ndim == 1 and - # NOTE: this clause avoids an error when > 1D, - # as '_validate' raises a more readable error. - coord_points.size > 1 and - coord_points[1] < coord_points[0]) + decreasing = ( + coord.ndim == 1 + and + # NOTE: this clause avoids an error when > 1D, + # as '_validate' raises a more readable error. + coord_points.size > 1 + and coord_points[1] < coord_points[0] + ) self._coord_decreasing.append(decreasing) if decreasing: coord_points = coord_points[::-1] # Record info if coord is circular, and adjust points. - circular = getattr(coord, 'circular', False) - modulus = getattr(coord.units, 'modulus', 0) + circular = getattr(coord, "circular", False) + modulus = getattr(coord.units, "modulus", 0) if circular or modulus: # Only DimCoords can be circular. if circular: coord_points = extend_circular_coord(coord, coord_points) - offset = 0.5 * (coord_points.max() + coord_points.min() - - modulus) - self._circulars.append((circular, modulus, - index, coord_dims[0], - offset)) + offset = 0.5 * (coord_points.max() + coord_points.min() - modulus) + self._circulars.append( + (circular, modulus, index, coord_dims[0], offset) + ) self._src_points.append(coord_points) # Record any interpolation cube dims we haven't already seen. - coord_dims = [c for c in coord_dims - if c not in self._interp_dims] + coord_dims = [c for c in coord_dims if c not in self._interp_dims] self._interp_dims += coord_dims self._validate() def _validate(self): - """ + """Perform checks to ensure interpolation request is valid. + Perform all sanity checks to ensure that the interpolation request over the cube with the specified coordinates is valid and can be performed. """ if len(set(self._interp_dims)) != len(self._src_coords): - raise ValueError('Coordinates repeat a data dimension - the ' - 'interpolation would be over-specified.') + raise ValueError( + "Coordinates repeat a data dimension - the " + "interpolation would be over-specified." + ) for coord in self._src_coords: if coord.ndim != 1: - raise ValueError('Interpolation coords must be 1-d for ' - 'rectilinear interpolation.') + raise ValueError( + "Interpolation coords must be 1-d for rectilinear interpolation." + ) if not isinstance(coord, DimCoord): # Check monotonic. if not iris.util.monotonic(coord.points, strict=True): - msg = 'Cannot interpolate over the non-' \ - 'monotonic coordinate {}.' + msg = "Cannot interpolate over the non-monotonic coordinate {}." raise ValueError(msg.format(coord.name())) def _interpolated_dtype(self, dtype): - """ - Determine the minimum base dtype required by the - underlying interpolator. - - """ - if self._method == 'nearest': + """Determine the minimum base dtype required by the underlying interpolator.""" + if self._method == "nearest": result = dtype else: result = np.result_type(_DEFAULT_DTYPE, dtype) return result def _points(self, sample_points, data, data_dims=None): - """ + """Interpolate at the specified points. + Interpolate the given data values at the specified list of orthogonal (coord, points) pairs. - Args: - - * sample_points: + Parameters + ---------- + sample_points : A list of N iterables, where N is the number of coordinates passed to the constructor. - [sample_values_for_coord_0, sample_values_for_coord_1, ...] - * data: + [sample_values_for_coord_0, sample_values_for_coord_1, ...]. + data : The data to interpolate - not necessarily the data from the cube that was used to construct this interpolator. If the data has fewer dimensions, then data_dims must be defined. - - Kwargs: - - * data_dims: + data_dims : optional The dimensions of the given data array in terms of the original cube passed through to this interpolator's constructor. If None, the data dimensions must map one-to-one onto the increasing dimension order of the cube. - Returns: + Returns + ------- + :class:`~numpy.ndarray` or :class:`~numpy.ma.MaskedArray` An :class:`~numpy.ndarray` or :class:`~numpy.ma.MaskedArray` instance of the interpolated data. @@ -515,13 +499,15 @@ def _points(self, sample_points, data, data_dims=None): data_dims = data_dims or dims if len(data_dims) != data.ndim: - msg = 'Data being interpolated is not consistent with ' \ - 'the data passed through.' + msg = ( + "Data being interpolated is not consistent with " + "the data passed through." + ) raise ValueError(msg) if sorted(data_dims) != list(data_dims): # To do this, a pre & post transpose will be necessary. - msg = 'Currently only increasing data_dims is supported.' + msg = "Currently only increasing data_dims is supported." raise NotImplementedError(msg) # Broadcast the data into the shape of the original cube. @@ -530,8 +516,7 @@ def _points(self, sample_points, data, data_dims=None): for dim in range(self._src_cube.ndim): if dim not in data_dims: strides.insert(dim, 0) - data = as_strided(data, strides=strides, - shape=self._src_cube.shape) + data = as_strided(data, strides=strides, shape=self._src_cube.shape) data = self._account_for_inverted(data) # Calculate the transpose order to shuffle the interpolated dimensions @@ -541,8 +526,7 @@ def _points(self, sample_points, data, data_dims=None): di = self._interp_dims ds = sorted(dims, key=lambda d: d not in di) dmap = {d: di.index(d) if d in di else ds.index(d) for d in dims} - interp_order, _ = zip(*sorted(dmap.items(), - key=operator.itemgetter(1))) + interp_order, _ = zip(*sorted(dmap.items(), key=operator.itemgetter(1))) _, src_order = zip(*sorted(dmap.items(), key=operator.itemgetter(0))) # Prepare the sample points for interpolation and calculate the @@ -555,8 +539,9 @@ def _points(self, sample_points, data, data_dims=None): interp_points.append(points) interp_shape.append(points.size) - interp_shape.extend(length for dim, length in enumerate(data.shape) if - dim not in di) + interp_shape.extend( + length for dim, length in enumerate(data.shape) if dim not in di + ) # Convert the interpolation points into a cross-product array # with shape (n_cross_points, n_dims) @@ -581,35 +566,31 @@ def _points(self, sample_points, data, data_dims=None): return result def __call__(self, sample_points, collapse_scalar=True): - """ - Construct a cube from the specified orthogonal interpolation points. + """Construct a cube from the specified orthogonal interpolation points. - Args: - - * sample_points: + Parameters + ---------- + sample_points : A list of N iterables, where N is the number of coordinates passed to the constructor. - [sample_values_for_coord_0, sample_values_for_coord_1, ...] - - Kwargs: - - * collapse_scalar: + [sample_values_for_coord_0, sample_values_for_coord_1, ...]. + collapse_scalar : bool, default=True Whether to collapse the dimension of the scalar sample points in the resulting cube. Default is True. - Returns: + Returns + ------- + :class:`iris.cube.Cube` A cube interpolated at the given sample points. The dimensionality of the cube will be the number of original cube dimensions minus the number of scalar coordinates, if collapse_scalar is True. """ if len(sample_points) != len(self._src_coords): - msg = 'Expected sample points for {} coordinates, got {}.' - raise ValueError(msg.format(len(self._src_coords), - len(sample_points))) + msg = "Expected sample points for {} coordinates, got {}." + raise ValueError(msg.format(len(self._src_coords), len(sample_points))) - sample_points = _canonical_sample_points(self._src_coords, - sample_points) + sample_points = _canonical_sample_points(self._src_coords, sample_points) data = self._src_cube.data # Interpolate the cube payload. @@ -654,15 +635,17 @@ def construct_new_coord(coord): else: if set(dims).intersection(set(self._interp_dims)): # Interpolate the coordinate payload. - new_coord = self._resample_coord(sample_points, coord, - dims) + new_coord = self._resample_coord(sample_points, coord, dims) else: new_coord = coord.copy() return new_coord, dims def gen_new_cube(): - if (isinstance(new_coord, DimCoord) and len(dims) > 0 and - dims[0] not in dims_with_dim_coords): + if ( + isinstance(new_coord, DimCoord) + and len(dims) > 0 + and dims[0] not in dims_with_dim_coords + ): new_cube._add_unique_dim_coord(new_coord, dims) dims_with_dim_coords.append(dims[0]) else: @@ -670,7 +653,7 @@ def gen_new_cube(): coord_mapping[id(coord)] = new_coord # Copy/interpolate the coordinates. - for coord in (cube.dim_coords + cube.aux_coords): + for coord in cube.dim_coords + cube.aux_coords: new_coord, dims = construct_new_coord(coord) gen_new_cube() @@ -678,8 +661,10 @@ def gen_new_cube(): new_cube.add_aux_factory(factory.updated(coord_mapping)) if collapse_scalar and _new_scalar_dims: - dim_slices = [0 if dim in _new_scalar_dims else slice(None) - for dim in range(new_cube.ndim)] + dim_slices = [ + 0 if dim in _new_scalar_dims else slice(None) + for dim in range(new_cube.ndim) + ] new_cube = new_cube[tuple(dim_slices)] return new_cube diff --git a/lib/iris/analysis/_regrid.py b/lib/iris/analysis/_regrid.py index f80d797642..431871de2c 100644 --- a/lib/iris/analysis/_regrid.py +++ b/lib/iris/analysis/_regrid.py @@ -1,23 +1,7 @@ -# (C) British Crown Copyright 2014 - 2019, Met Office +# Copyright Iris contributors # -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa -import six +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. import copy import functools @@ -25,106 +9,116 @@ import numpy as np import numpy.ma as ma - -from iris.analysis._interpolation import (EXTRAPOLATION_MODES, - extend_circular_coord_and_data, - get_xy_dim_coords, snapshot_grid) +from scipy.sparse import csc_matrix + +from iris._lazy_data import map_complete_blocks +from iris.analysis._interpolation import ( + EXTRAPOLATION_MODES, + extend_circular_coord_and_data, + get_xy_dim_coords, + snapshot_grid, +) from iris.analysis._scipy_interpolate import _RegularGridInterpolator -import iris.cube -from iris.util import _meshgrid - -from scipy.sparse import csc_matrix, diags as sparse_diags +from iris.util import _meshgrid, guess_coord_axis +from iris.warnings import IrisImpossibleUpdateWarning def _transform_xy_arrays(crs_from, x, y, crs_to): - """ - Transform 2d points between cartopy coordinate reference systems. + """Transform 2d points between cartopy coordinate reference systems. NOTE: copied private function from iris.analysis.cartography. - Args: - - * crs_from, crs_to (:class:`cartopy.crs.Projection`): + Parameters + ---------- + crs_from : :class:`cartopy.crs.Projection` + The coordinate reference systems. + x, y : arrays + Point locations defined in 'crs_from'. + crs_to : :class:`cartopy.crs.Projection` The coordinate reference systems. - * x, y (arrays): - point locations defined in 'crs_from'. - Returns: - x, y : Arrays of locations defined in 'crs_to'. + Returns + ------- + (array, array) + Arrays of locations defined in 'crs_to' of (x, y). """ pts = crs_to.transform_points(crs_from, x, y) return pts[..., 0], pts[..., 1] -def _regrid_weighted_curvilinear_to_rectilinear__prepare( - src_cube, weights, grid_cube): - """ - First (setup) part of 'regrid_weighted_curvilinear_to_rectilinear'. +def _regrid_weighted_curvilinear_to_rectilinear__prepare(src_cube, weights, grid_cube): + """First (setup) part of 'regrid_weighted_curvilinear_to_rectilinear'. Check inputs and calculate the sparse regrid matrix and related info. - The 'regrid info' returned can be re-used over many 2d slices. + The 'regrid info' returned can be re-used over many cubes. """ - if src_cube.aux_factories: - msg = 'All source cube derived coordinates will be ignored.' - warnings.warn(msg) - # Get the source cube x and y 2D auxiliary coordinates. - sx, sy = src_cube.coord(axis='x'), src_cube.coord(axis='y') + sx, sy = src_cube.coord(axis="x"), src_cube.coord(axis="y") # Get the target grid cube x and y dimension coordinates. tx, ty = get_xy_dim_coords(grid_cube) + sl = [0] * grid_cube.ndim + sl[grid_cube.coord_dims(tx)[0]] = np.s_[:] + sl[grid_cube.coord_dims(ty)[0]] = np.s_[:] + grid_cube = grid_cube[tuple(sl)] + if sx.units != sy.units: - msg = 'The source cube x ({!r}) and y ({!r}) coordinates must ' \ - 'have the same units.' + msg = ( + "The source cube x ({!r}) and y ({!r}) coordinates must " + "have the same units." + ) raise ValueError(msg.format(sx.name(), sy.name())) if src_cube.coord_dims(sx) != src_cube.coord_dims(sy): - msg = 'The source cube x ({!r}) and y ({!r}) coordinates must ' \ - 'map onto the same cube dimensions.' + msg = ( + "The source cube x ({!r}) and y ({!r}) coordinates must " + "map onto the same cube dimensions." + ) raise ValueError(msg.format(sx.name(), sy.name())) if sx.coord_system != sy.coord_system: - msg = 'The source cube x ({!r}) and y ({!r}) coordinates must ' \ - 'have the same coordinate system.' + msg = ( + "The source cube x ({!r}) and y ({!r}) coordinates must " + "have the same coordinate system." + ) raise ValueError(msg.format(sx.name(), sy.name())) if sx.coord_system is None: - msg = ('The source X and Y coordinates must have a defined ' - 'coordinate system.') + msg = "The source X and Y coordinates must have a defined coordinate system." raise ValueError(msg) if tx.units != ty.units: - msg = 'The target grid cube x ({!r}) and y ({!r}) coordinates must ' \ - 'have the same units.' + msg = ( + "The target grid cube x ({!r}) and y ({!r}) coordinates must " + "have the same units." + ) raise ValueError(msg.format(tx.name(), ty.name())) if tx.coord_system is None: - msg = ('The target X and Y coordinates must have a defined ' - 'coordinate system.') + msg = "The target X and Y coordinates must have a defined coordinate system." raise ValueError(msg) if tx.coord_system != ty.coord_system: - msg = 'The target grid cube x ({!r}) and y ({!r}) coordinates must ' \ - 'have the same coordinate system.' + msg = ( + "The target grid cube x ({!r}) and y ({!r}) coordinates must " + "have the same coordinate system." + ) raise ValueError(msg.format(tx.name(), ty.name())) if weights is None: weights = np.ones(sx.shape) if weights.shape != sx.shape: - msg = ('Provided weights must have the same shape as the X and Y ' - 'coordinates.') + msg = "Provided weights must have the same shape as the X and Y coordinates." raise ValueError(msg) if not tx.has_bounds() or not tx.is_contiguous(): - msg = 'The target grid cube x ({!r})coordinate requires ' \ - 'contiguous bounds.' + msg = "The target grid cube x ({!r})coordinate requires contiguous bounds." raise ValueError(msg.format(tx.name())) if not ty.has_bounds() or not ty.is_contiguous(): - msg = 'The target grid cube y ({!r}) coordinate requires ' \ - 'contiguous bounds.' + msg = "The target grid cube y ({!r}) coordinate requires contiguous bounds." raise ValueError(msg.format(ty.name())) def _src_align_and_flatten(coord): @@ -138,8 +132,10 @@ def _src_align_and_flatten(coord): if src_cube.coord_dims(coord) == (1, 0): points = points.T if points.shape != src_cube.shape: - msg = 'The shape of the points array of {!r} is not compatible ' \ - 'with the shape of {!r}.' + msg = ( + "The shape of the points array of {!r} is not compatible " + "with the shape of {!r}." + ) raise ValueError(msg.format(coord.name(), src_cube.name())) return np.asarray(points.flatten()) @@ -152,7 +148,8 @@ def _src_align_and_flatten(coord): src_crs = sx.coord_system.as_cartopy_projection() tgt_crs = tx.coord_system.as_cartopy_projection() sx_points, sy_points = _transform_xy_arrays( - src_crs, sx_points, sy_points, tgt_crs) + src_crs, sx_points, sy_points, tgt_crs + ) # # TODO: how does this work with scaled units ?? # e.g. if crs is latlon, units could be degrees OR radians ? @@ -168,24 +165,22 @@ def _src_align_and_flatten(coord): indices = np.where(sx_points < 0) # Ensure += doesn't raise a TypeError if not np.can_cast(modulus, sx_points.dtype): - sx_points = sx_points.astype(type(modulus), casting='safe') + sx_points = sx_points.astype(type(modulus), casting="safe") sx_points[indices] += modulus elif min_sx >= 0 and min_tx < 0: indices = np.where(sx_points > (modulus / 2)) # Ensure -= doesn't raise a TypeError if not np.can_cast(modulus, sx_points.dtype): - sx_points = sx_points.astype(type(modulus), casting='safe') + sx_points = sx_points.astype(type(modulus), casting="safe") sx_points[indices] -= modulus # Create target grid cube x and y cell boundaries. tx_depth, ty_depth = tx.points.size, ty.points.size - tx_dim, = grid_cube.coord_dims(tx) - ty_dim, = grid_cube.coord_dims(ty) + (tx_dim,) = grid_cube.coord_dims(tx) + (ty_dim,) = grid_cube.coord_dims(ty) - tx_cells = np.concatenate((tx.bounds[:, 0], - tx.bounds[-1, 1].reshape(1))) - ty_cells = np.concatenate((ty.bounds[:, 0], - ty.bounds[-1, 1].reshape(1))) + tx_cells = np.concatenate((tx.bounds[:, 0], tx.bounds[-1, 1].reshape(1))) + ty_cells = np.concatenate((ty.bounds[:, 0], ty.bounds[-1, 1].reshape(1))) # Determine the target grid cube x and y cells that bound # the source cube x and y points. @@ -196,22 +191,24 @@ def _regrid_indices(cells, depth, points): if extent == 0: # Detected an dimension coordinate with an invalid # zero length cell extent. - msg = 'The target grid cube {} ({!r}) coordinate contains ' \ - 'a zero length cell extent.' - axis, name = 'x', tx.name() + msg = ( + "The target grid cube {} ({!r}) coordinate contains " + "a zero length cell extent." + ) + axis, name = "x", tx.name() if points is sy_points: - axis, name = 'y', ty.name() + axis, name = "y", ty.name() raise ValueError(msg.format(axis, name)) elif extent > 0: # The cells of the dimension coordinate are in ascending order. - indices = np.searchsorted(cells, points, side='right') - 1 + indices = np.searchsorted(cells, points, side="right") - 1 else: # The cells of the dimension coordinate are in descending order. # np.searchsorted() requires ascending order, so we require to # account for this restriction. cells = cells[::-1] - right = np.searchsorted(cells, points, side='right') - left = np.searchsorted(cells, points, side='left') + right = np.searchsorted(cells, points, side="right") + left = np.searchsorted(cells, points, side="left") indices = depth - right # Only those points that exactly match the left-hand cell bound # will differ between 'left' and 'right'. Thus their appropriate @@ -226,15 +223,19 @@ def _regrid_indices(cells, depth, points): x_indices = _regrid_indices(tx_cells, tx_depth, sx_points) y_indices = _regrid_indices(ty_cells, ty_depth, sy_points) - # Now construct a sparse M x N matix, where M is the flattened target + # Now construct a sparse M x N matrix, where M is the flattened target # space, and N is the flattened source space. The sparse matrix will then # be populated with those source cube points that contribute to a specific # target cube cell. # Determine the valid indices and their offsets in M x N space. # Calculate the valid M offsets. - cols = np.where((y_indices >= 0) & (y_indices < ty_depth) & - (x_indices >= 0) & (x_indices < tx_depth))[0] + cols = np.where( + (y_indices >= 0) + & (y_indices < ty_depth) + & (x_indices >= 0) + & (x_indices < tx_depth) + )[0] # Reduce the indices to only those that are valid. x_indices = x_indices[cols] @@ -251,8 +252,9 @@ def _regrid_indices(cells, depth, points): data = weights_flat[cols] # Build our sparse M x N matrix of weights. - sparse_matrix = csc_matrix((data, (rows, cols)), - shape=(grid_cube.data.size, src_cube.data.size)) + sparse_matrix = csc_matrix( + (data, (rows, cols)), shape=(grid_cube.data.size, src_cube.data.size) + ) # Performing a sparse sum to collapse the matrix to (M, 1). sum_weights = sparse_matrix.sum(axis=1).getA() @@ -270,111 +272,134 @@ def _regrid_indices(cells, depth, points): return regrid_info -def _regrid_weighted_curvilinear_to_rectilinear__perform( - src_cube, regrid_info): - """ - Second (regrid) part of 'regrid_weighted_curvilinear_to_rectilinear'. +def _curvilinear_to_rectilinear_regrid_data( + data, + dims, + regrid_info, +): + """Part of 'regrid_weighted_curvilinear_to_rectilinear' which acts on the data. - Perform the prepared regrid calculation on a single 2d cube. + Perform the prepared regrid calculation on an array. """ sparse_matrix, sum_weights, rows, grid_cube = regrid_info + inds = list(range(-len(dims), 0)) + data = np.moveaxis(data, dims, inds) + data_shape = data.shape + grid_size = np.prod([data_shape[ind] for ind in inds]) + # Calculate the numerator of the weighted mean (M, 1). - is_masked = ma.isMaskedArray(src_cube.data) + is_masked = ma.isMaskedArray(data) + sum_weights = None if not is_masked: - data = src_cube.data + data = data else: # Use raw data array - data = src_cube.data.data + r_data = data.data # Check if there are any masked source points to take account of. - is_masked = np.ma.is_masked(src_cube.data) + is_masked = ma.is_masked(data) if is_masked: # Zero any masked source points so they add nothing in output sums. - mask = src_cube.data.mask - data[mask] = 0.0 + mask = data.mask + r_data[mask] = 0.0 # Calculate a new 'sum_weights' to allow for missing source points. # N.B. it is more efficient to use the original once-calculated # sparse matrix, but in this case we can't. # Hopefully, this post-multiplying by the validities is less costly # than repeating the whole sparse calculation. - valid_src_cells = ~mask.flat[:] - src_cell_validity_factors = sparse_diags( - np.array(valid_src_cells, dtype=int), - 0) - valid_weights = sparse_matrix * src_cell_validity_factors - sum_weights = valid_weights.sum(axis=1).getA() - # Work out where output cells are missing all contributions. - # This allows for where 'rows' contains output cells that have no - # data because of missing input points. - zero_sums = sum_weights == 0.0 - # Make sure we can still divide by sum_weights[rows]. - sum_weights[zero_sums] = 1.0 + valid_src_cells = ~mask.reshape(-1, grid_size) + sum_weights = valid_src_cells @ sparse_matrix.T + data = r_data + if sum_weights is None: + sum_weights = np.ones(data_shape).reshape(-1, grid_size) @ sparse_matrix.T + # Work out where output cells are missing all contributions. + # This allows for where 'rows' contains output cells that have no + # data because of missing input points. + zero_sums = sum_weights == 0.0 + # Make sure we can still divide by sum_weights[rows]. + sum_weights[zero_sums] = 1.0 # Calculate sum in each target cell, over contributions from each source # cell. - numerator = sparse_matrix * data.reshape(-1, 1) - - # Create a template for the weighted mean result. - weighted_mean = ma.masked_all(numerator.shape, dtype=numerator.dtype) - - # Calculate final results in all relevant places. - weighted_mean[rows] = numerator[rows] / sum_weights[rows] - if is_masked: - # Ensure masked points where relevant source cells were all missing. - if np.any(zero_sums): - # Make masked if it wasn't. - weighted_mean = np.ma.asarray(weighted_mean) - # Mask where contributing sums were zero. - weighted_mean[zero_sums] = np.ma.masked - - # Construct the final regridded weighted mean cube. - tx = grid_cube.coord(axis='x', dim_coords=True) - ty = grid_cube.coord(axis='y', dim_coords=True) - tx_dim, = grid_cube.coord_dims(tx) - ty_dim, = grid_cube.coord_dims(ty) - dim_coords_and_dims = list(zip((ty.copy(), tx.copy()), (ty_dim, tx_dim))) - cube = iris.cube.Cube(weighted_mean.reshape(grid_cube.shape), - dim_coords_and_dims=dim_coords_and_dims) - cube.metadata = copy.deepcopy(src_cube.metadata) - - for coord in src_cube.coords(dimensions=()): - cube.add_aux_coord(coord.copy()) - - return cube - - -class CurvilinearRegridder(object): + numerator = data.reshape(-1, grid_size) @ sparse_matrix.T + + weighted_mean = numerator / sum_weights + # Ensure masked points where relevant source cells were all missing. + weighted_mean = ma.asarray(weighted_mean) + if np.any(zero_sums): + # Mask where contributing sums were zero. + weighted_mean[zero_sums] = ma.masked + + new_data_shape = list(data_shape) + for dim, length in zip(inds, grid_cube.shape): + new_data_shape[dim] = length + if len(dims) == 1: + new_data_shape.append(grid_cube.shape[1]) + dims = (dims[0], dims[0] + 1) + if len(dims) > 2: + new_data_shape = new_data_shape[: 2 - len(dims)] + dims = dims[:2] + + result = weighted_mean.reshape(new_data_shape) + result = np.moveaxis(result, [-2, -1], dims) + return result + + +def _regrid_weighted_curvilinear_to_rectilinear__perform(src_cube, regrid_info): + """Second (regrid) part of 'regrid_weighted_curvilinear_to_rectilinear'. + + Perform the prepared regrid calculation on a single cube. + """ + dims = src_cube.coord_dims( + CurvilinearRegridder._get_horizontal_coord(src_cube, "x") + ) + result_data = _curvilinear_to_rectilinear_regrid_data( + src_cube.data, dims, regrid_info + ) + grid_cube = regrid_info[-1] + tx = grid_cube.coord(axis="x", dim_coords=True) + ty = grid_cube.coord(axis="y", dim_coords=True) + regrid_callback = functools.partial( + _curvilinear_to_rectilinear_regrid_data, regrid_info=regrid_info + ) + result = _create_cube( + result_data, src_cube, dims, (ty.copy(), tx.copy()), 2, regrid_callback + ) + return result + + +class CurvilinearRegridder: + """Provides support for performing point-in-cell regridding. + This class provides support for performing point-in-cell regridding between a curvilinear source grid and a rectilinear target grid. """ - def __init__(self, src_grid_cube, target_grid_cube, weights=None): - """ - Create a regridder for conversions between the source - and target grids. - Args: + def __init__(self, src_grid_cube, target_grid_cube, weights=None): + """Create a regridder for conversions between the source and target grids. - * src_grid_cube: + Parameters + ---------- + src_grid_cube : :class:`~iris.cube.Cube` The :class:`~iris.cube.Cube` providing the source grid. - * tgt_grid_cube: + tgt_grid_cube : :class:`~iris.cube.Cube` The :class:`~iris.cube.Cube` providing the target grid. - - Optional Args: - - * weights: + weights : optional A :class:`numpy.ndarray` instance that defines the weights for the grid cells of the source grid. Must have the same shape as the data of the source grid. If unspecified, equal weighting is assumed. """ + from iris.cube import Cube + # Validity checks. - if not isinstance(src_grid_cube, iris.cube.Cube): + if not isinstance(src_grid_cube, Cube): raise TypeError("'src_grid_cube' must be a Cube") - if not isinstance(target_grid_cube, iris.cube.Cube): + if not isinstance(target_grid_cube, Cube): raise TypeError("'target_grid_cube' must be a Cube") # Snapshot the state of the cubes to ensure that the regridder # is impervious to external changes to the original source cubes. @@ -385,123 +410,125 @@ def __init__(self, src_grid_cube, target_grid_cube, weights=None): @staticmethod def _get_horizontal_coord(cube, axis): - """ - Gets the horizontal coordinate on the supplied cube along the - specified axis. - - Args: + """Get the horizontal coordinate on the supplied cube along the specified axis. - * cube: + Parameters + ---------- + cube : :class:`iris.cube.Cube` An instance of :class:`iris.cube.Cube`. - * axis: + axis : Locate coordinates on `cube` along this axis. - Returns: - The horizontal coordinate on the specified axis of the supplied - cube. + Returns + ------- + The horizontal coordinate on the specified axis of the supplied cube. """ coords = cube.coords(axis=axis, dim_coords=False) if len(coords) != 1: - raise ValueError('Cube {!r} must contain a single 1D {} ' - 'coordinate.'.format(cube.name()), axis) + raise ValueError( + "Cube {!r} must contain a single 1D {} coordinate.".format( + cube.name(), axis + ) + ) return coords[0] def __call__(self, src): - """ + """Regrid onto the target grid. + Regrid the supplied :class:`~iris.cube.Cube` on to the target grid of this :class:`_CurvilinearRegridder`. The given cube must be defined with the same grid as the source grid used to create this :class:`_CurvilinearRegridder`. - Args: + If the source cube has lazy data, it will be realized before + regridding and the returned cube will also have realized data. - * src: + Parameters + ---------- + src : :class:`~iris.cube.Cube` A :class:`~iris.cube.Cube` to be regridded. - Returns: + Returns + ------- + :class:`~iris.cube.Cube` A cube defined with the horizontal dimensions of the target and the other dimensions from this cube. The data values of this cube will be converted to values on the new grid using point-in-cell regridding. """ + from iris.cube import Cube + # Validity checks. - if not isinstance(src, iris.cube.Cube): + if not isinstance(src, Cube): raise TypeError("'src' must be a Cube") - gx = self._get_horizontal_coord(self._src_cube, 'x') - gy = self._get_horizontal_coord(self._src_cube, 'y') + gx = self._get_horizontal_coord(self._src_cube, "x") + gy = self._get_horizontal_coord(self._src_cube, "y") src_grid = (gx.copy(), gy.copy()) - sx = self._get_horizontal_coord(src, 'x') - sy = self._get_horizontal_coord(src, 'y') + sx = self._get_horizontal_coord(src, "x") + sy = self._get_horizontal_coord(src, "y") if (sx, sy) != src_grid: - raise ValueError('The given cube is not defined on the same ' - 'source grid as this regridder.') - - # Call the regridder function. - # This includes repeating over any non-XY dimensions, because the - # underlying routine does not support this. - # FOR NOW: we will use cube.slices and merge to achieve this, - # though that is not a terribly efficient method ... - # TODO: create a template result cube and paste data slices into it, - # which would be more efficient. - result_slices = iris.cube.CubeList([]) - for slice_cube in src.slices(sx): - if self._regrid_info is None: - # Calculate the basic regrid info just once. - self._regrid_info = \ - _regrid_weighted_curvilinear_to_rectilinear__prepare( - slice_cube, self.weights, self._target_cube) - slice_result = \ - _regrid_weighted_curvilinear_to_rectilinear__perform( - slice_cube, self._regrid_info) - result_slices.append(slice_result) - result = result_slices.merge_cube() + raise ValueError( + "The given cube is not defined on the same " + "source grid as this regridder." + ) + slice_cube = next(src.slices(sx)) + if self._regrid_info is None: + # Calculate the basic regrid info just once. + self._regrid_info = _regrid_weighted_curvilinear_to_rectilinear__prepare( + slice_cube, self.weights, self._target_cube + ) + result = _regrid_weighted_curvilinear_to_rectilinear__perform( + src, self._regrid_info + ) + return result -class RectilinearRegridder(object): - """ +class RectilinearRegridder: + """Provides support for performing nearest-neighbour or linear regridding. + This class provides support for performing nearest-neighbour or linear regridding between source and target grids. """ - def __init__(self, src_grid_cube, tgt_grid_cube, method, - extrapolation_mode): - """ - Create a regridder for conversions between the source - and target grids. - Args: + def __init__(self, src_grid_cube, tgt_grid_cube, method, extrapolation_mode): + """Create a regridder for conversions between the source and target grids. - * src_grid_cube: + Parameters + ---------- + src_grid_cube : :class:`~iris.cube.Cube` The :class:`~iris.cube.Cube` providing the source grid. - * tgt_grid_cube: + tgt_grid_cube : :class:`~iris.cube.Cube` The :class:`~iris.cube.Cube` providing the target grid. - * method: + method : Either 'linear' or 'nearest'. - * extrapolation_mode: + extrapolation_mode : str Must be one of the following strings: - * 'extrapolate' - The extrapolation points will be - calculated by extending the gradient of the closest two - points. - * 'nan' - The extrapolation points will be be set to NaN. - * 'error' - An exception will be raised, notifying an - attempt to extrapolate. - * 'mask' - The extrapolation points will always be masked, even - if the source data is not a MaskedArray. - * 'nanmask' - If the source data is a MaskedArray the - extrapolation points will be masked. Otherwise they will be - set to NaN. + * 'extrapolate' - The extrapolation points will be + calculated by extending the gradient of the closest two + points. + * 'nan' - The extrapolation points will be be set to NaN. + * 'error' - An exception will be raised, notifying an + attempt to extrapolate. + * 'mask' - The extrapolation points will always be masked, even + if the source data is not a MaskedArray. + * 'nanmask' - If the source data is a MaskedArray the + extrapolation points will be masked. Otherwise they will be + set to NaN. """ + from iris.cube import Cube + # Validity checks. - if not isinstance(src_grid_cube, iris.cube.Cube): + if not isinstance(src_grid_cube, Cube): raise TypeError("'src_grid_cube' must be a Cube") - if not isinstance(tgt_grid_cube, iris.cube.Cube): + if not isinstance(tgt_grid_cube, Cube): raise TypeError("'tgt_grid_cube' must be a Cube") # Snapshot the state of the cubes to ensure that the regridder # is impervious to external changes to the original source cubes. @@ -511,13 +538,13 @@ def __init__(self, src_grid_cube, tgt_grid_cube, method, for coord in self._tgt_grid: self._check_units(coord) # Whether to use linear or nearest-neighbour interpolation. - if method not in ('linear', 'nearest'): - msg = 'Regridding method {!r} not supported.'.format(method) + if method not in ("linear", "nearest"): + msg = "Regridding method {!r} not supported.".format(method) raise ValueError(msg) self._method = method # The extrapolation mode. if extrapolation_mode not in EXTRAPOLATION_MODES: - msg = 'Invalid extrapolation mode {!r}' + msg = "Invalid extrapolation mode {!r}" raise ValueError(msg.format(extrapolation_mode)) self._extrapolation_mode = extrapolation_mode @@ -531,24 +558,27 @@ def extrapolation_mode(self): @staticmethod def _sample_grid(src_coord_system, grid_x_coord, grid_y_coord): - """ + """Convert the rectilinear grid to a curvilinear grid. + Convert the rectilinear grid coordinates to a curvilinear grid in the source coordinate system. The `grid_x_coord` and `grid_y_coord` must share a common coordinate system. - Args: - - * src_coord_system: + Parameters + ---------- + src_coord_system : :class:`iris.coord_system.CoordSystem` The :class:`iris.coord_system.CoordSystem` for the grid of the source Cube. - * grid_x_coord: + grid_x_coord : :class:`iris.coords.DimCoord` The :class:`iris.coords.DimCoord` for the X coordinate. - * grid_y_coord: + grid_y_coord : :class:`iris.coords.DimCoord` The :class:`iris.coords.DimCoord` for the Y coordinate. - Returns: + Returns + ------- + tuple A tuple of the X and Y coordinate values as 2-dimensional arrays. @@ -567,62 +597,69 @@ def _sample_grid(src_coord_system, grid_x_coord, grid_y_coord): return sample_grid_x, sample_grid_y @staticmethod - def _regrid(src_data, x_dim, y_dim, - src_x_coord, src_y_coord, - sample_grid_x, sample_grid_y, - method='linear', extrapolation_mode='nanmask'): - """ - Regrid the given data from the src grid to the sample grid. + def _regrid( + src_data, + x_dim, + y_dim, + src_x_coord, + src_y_coord, + sample_grid_x, + sample_grid_y, + method="linear", + extrapolation_mode="nanmask", + ): + """Regrid the given data from the src grid to the sample grid. The result will be a MaskedArray if either/both of: - - the source array is a MaskedArray, - - the extrapolation_mode is 'mask' and the result requires - extrapolation. + + * the source array is a MaskedArray, + * the extrapolation_mode is 'mask' and the result requires + extrapolation. If the result is a MaskedArray the mask for each element will be set if either/both of: - - there is a non-zero contribution from masked items in the input data - - the element requires extrapolation and the extrapolation_mode - dictates a masked value. - Args: + * there is a non-zero contribution from masked items in the input data + * the element requires extrapolation and the extrapolation_mode + dictates a masked value. - * src_data: + Parameters + ---------- + src_data : An N-dimensional NumPy array or MaskedArray. - * x_dim: + x_dim : The X dimension within `src_data`. - * y_dim: + y_dim : The Y dimension within `src_data`. - * src_x_coord: + src_x_coord : :class:`iris.coords.DimCoord` The X :class:`iris.coords.DimCoord`. - * src_y_coord: + src_y_coord : :class:`iris.coords.DimCoord` The Y :class:`iris.coords.DimCoord`. - * sample_grid_x: + sample_grid_x : A 2-dimensional array of sample X values. - * sample_grid_y: + sample_grid_y : A 2-dimensional array of sample Y values. - - Kwargs: - - * method: + method : str, default="linear" Either 'linear' or 'nearest'. The default method is 'linear'. - * extrapolation_mode: + extrapolation_mode : str, default="nanmask" Must be one of the following strings: - * 'linear' - The extrapolation points will be calculated by - extending the gradient of the closest two points. - * 'nan' - The extrapolation points will be be set to NaN. - * 'error' - A ValueError exception will be raised, notifying an - attempt to extrapolate. - * 'mask' - The extrapolation points will always be masked, even - if the source data is not a MaskedArray. - * 'nanmask' - If the source data is a MaskedArray the - extrapolation points will be masked. Otherwise they will be - set to NaN. + * 'linear' - The extrapolation points will be calculated by + extending the gradient of the closest two points. + * 'nan' - The extrapolation points will be be set to NaN. + * 'error' - A ValueError exception will be raised, notifying an + attempt to extrapolate. + * 'mask' - The extrapolation points will always be masked, even + if the source data is not a MaskedArray. + * 'nanmask' - If the source data is a MaskedArray the + extrapolation points will be masked. Otherwise they will be + set to NaN. The default mode of extrapolation is 'nanmask'. - Returns: + Returns + ------- + NumPy array The regridded data as an N-dimensional NumPy array. The lengths of the X and Y dimensions will now match those of the sample grid. @@ -633,36 +670,55 @@ def _regrid(src_data, x_dim, y_dim, # experimental regrid_area_weighted_rectilinear_src_and_grid # if sample_grid_x.shape != sample_grid_y.shape: - raise ValueError('Inconsistent sample grid shapes.') + raise ValueError("Inconsistent sample grid shapes.") if sample_grid_x.ndim != 2: - raise ValueError('Sample grid must be 2-dimensional.') + raise ValueError("Sample grid must be 2-dimensional.") # Prepare the result data array shape = list(src_data.shape) - assert shape[x_dim] == src_x_coord.shape[0] - assert shape[y_dim] == src_y_coord.shape[0] - - shape[y_dim] = sample_grid_x.shape[0] - shape[x_dim] = sample_grid_x.shape[1] + final_shape = shape.copy() + if x_dim is not None: + assert shape[x_dim] == src_x_coord.shape[0] + shape[x_dim] = sample_grid_x.shape[1] + final_shape[x_dim] = shape[x_dim] + else: + shape.append(1) + x_dim = len(shape) - 1 + src_data = np.expand_dims(src_data, -1) + if y_dim is not None: + assert shape[y_dim] == src_y_coord.shape[0] + shape[y_dim] = sample_grid_x.shape[0] + final_shape[y_dim] = shape[y_dim] + else: + shape.append(1) + y_dim = len(shape) - 1 + src_data = np.expand_dims(src_data, -1) dtype = src_data.dtype - if method == 'linear': + if method == "linear": # If we're given integer values, convert them to the smallest # possible float dtype that can accurately preserve the values. - if dtype.kind == 'i': + if dtype.kind == "i": dtype = np.promote_types(dtype, np.float16) if ma.isMaskedArray(src_data): data = ma.empty(shape, dtype=dtype) - data.mask = np.zeros(data.shape, dtype=np.bool) + data.mask = np.zeros(data.shape, dtype=np.bool_) else: data = np.empty(shape, dtype=dtype) # The interpolation class requires monotonically increasing # coordinates, so flip the coordinate(s) and data if they aren't. - reverse_x = (src_x_coord.points[0] > src_x_coord.points[1] if - src_x_coord.points.size > 1 else False) - reverse_y = src_y_coord.points[0] > src_y_coord.points[1] + reverse_x = ( + src_x_coord.points[0] > src_x_coord.points[1] + if src_x_coord.points.size > 1 + else False + ) + reverse_y = ( + src_y_coord.points[0] > src_y_coord.points[1] + if src_y_coord.points.size > 1 + else False + ) flip_index = [slice(None)] * src_data.ndim if reverse_x: src_x_coord = src_x_coord[::-1] @@ -673,15 +729,15 @@ def _regrid(src_data, x_dim, y_dim, src_data = src_data[tuple(flip_index)] if src_x_coord.circular: - x_points, src_data = extend_circular_coord_and_data(src_x_coord, - src_data, - x_dim) + x_points, src_data = extend_circular_coord_and_data( + src_x_coord, src_data, x_dim + ) else: x_points = src_x_coord.points # Slice out the first full 2D piece of data for construction of the # interpolator. - index = [0] * src_data.ndim + index = [0] * len(shape) index[x_dim] = index[y_dim] = slice(None) initial_data = src_data[tuple(index)] if y_dim < x_dim: @@ -689,30 +745,34 @@ def _regrid(src_data, x_dim, y_dim, # Construct the interpolator, we will fill in any values out of bounds # manually. - interpolator = _RegularGridInterpolator([x_points, src_y_coord.points], - initial_data, method=method, - bounds_error=False, - fill_value=None) + interpolator = _RegularGridInterpolator( + [x_points, src_y_coord.points], + initial_data, + method=method, + bounds_error=False, + fill_value=None, + ) # The constructor of the _RegularGridInterpolator class does # some unnecessary checks on these values, so we set them # afterwards instead. Sneaky. ;-) try: mode = EXTRAPOLATION_MODES[extrapolation_mode] except KeyError: - raise ValueError('Invalid extrapolation mode.') + raise ValueError("Invalid extrapolation mode.") interpolator.bounds_error = mode.bounds_error interpolator.fill_value = mode.fill_value # Construct the target coordinate points array, suitable for passing to # the interpolator multiple times. - interp_coords = [sample_grid_x.astype(np.float64)[..., np.newaxis], - sample_grid_y.astype(np.float64)[..., np.newaxis]] + interp_coords = [ + sample_grid_x.astype(np.float64)[..., np.newaxis], + sample_grid_y.astype(np.float64)[..., np.newaxis], + ] # Map all the requested values into the range of the source # data (centred over the centre of the source data to allow # extrapolation where required). min_x, max_x = x_points.min(), x_points.max() - min_y, max_y = src_y_coord.points.min(), src_y_coord.points.max() if src_x_coord.units.modulus: modulus = src_x_coord.units.modulus offset = (max_x + min_x - modulus) * 0.5 @@ -752,182 +812,92 @@ def interpolate(data): if ma.isMaskedArray(data) or mode.force_mask: # NB. np.ma.getmaskarray returns an array of `False` if # `src_subset` is not a masked array. - src_mask = np.ma.getmaskarray(src_subset) + src_mask = ma.getmaskarray(src_subset) interpolator.fill_value = mode.mask_fill_value mask_fraction = interpolate(src_mask) - new_mask = (mask_fraction > 0) + new_mask = mask_fraction > 0 - if np.ma.isMaskedArray(data): + if ma.isMaskedArray(data): data.mask[tuple(index)] = new_mask elif np.any(new_mask): # Set mask=False to ensure we have an expanded mask array. - data = np.ma.MaskedArray(data, mask=False) + data = ma.MaskedArray(data, mask=False) data.mask[tuple(index)] = new_mask + data = data.reshape(final_shape) return data - @staticmethod - def _create_cube(data, src, x_dim, y_dim, src_x_coord, src_y_coord, - grid_x_coord, grid_y_coord, sample_grid_x, sample_grid_y, - regrid_callback): - """ - Return a new Cube for the result of regridding the source Cube onto - the new grid. - - All the metadata and coordinates of the result Cube are copied from - the source Cube, with two exceptions: - - Grid dimension coordinates are copied from the grid Cube. - - Auxiliary coordinates which span the grid dimensions are - ignored, except where they provide a reference surface for an - :class:`iris.aux_factory.AuxCoordFactory`. - - Args: - - * data: - The regridded data as an N-dimensional NumPy array. - * src: - The source Cube. - * x_dim: - The X dimension within the source Cube. - * y_dim: - The Y dimension within the source Cube. - * src_x_coord: - The X :class:`iris.coords.DimCoord`. - * src_y_coord: - The Y :class:`iris.coords.DimCoord`. - * grid_x_coord: - The :class:`iris.coords.DimCoord` for the new grid's X - coordinate. - * grid_y_coord: - The :class:`iris.coords.DimCoord` for the new grid's Y - coordinate. - * sample_grid_x: - A 2-dimensional array of sample X values. - * sample_grid_y: - A 2-dimensional array of sample Y values. - * regrid_callback: - The routine that will be used to calculate the interpolated - values of any reference surfaces. - - Returns: - The new, regridded Cube. - - """ - # - # XXX: At the moment requires to be a static method as used by - # experimental regrid_area_weighted_rectilinear_src_and_grid - # - # Create a result cube with the appropriate metadata - result = iris.cube.Cube(data) - result.metadata = copy.deepcopy(src.metadata) - - # Copy across all the coordinates which don't span the grid. - # Record a mapping from old coordinate IDs to new coordinates, - # for subsequent use in creating updated aux_factories. - coord_mapping = {} - - def copy_coords(src_coords, add_method): - for coord in src_coords: - dims = src.coord_dims(coord) - if coord is src_x_coord: - coord = grid_x_coord - elif coord is src_y_coord: - coord = grid_y_coord - elif x_dim in dims or y_dim in dims: - continue - result_coord = coord.copy() - add_method(result_coord, dims) - coord_mapping[id(coord)] = result_coord - - copy_coords(src.dim_coords, result.add_dim_coord) - copy_coords(src.aux_coords, result.add_aux_coord) - - def regrid_reference_surface(src_surface_coord, surface_dims, - x_dim, y_dim, src_x_coord, src_y_coord, - sample_grid_x, sample_grid_y, - regrid_callback): - # Determine which of the reference surface's dimensions span the X - # and Y dimensions of the source cube. - surface_x_dim = surface_dims.index(x_dim) - surface_y_dim = surface_dims.index(y_dim) - surface = regrid_callback(src_surface_coord.points, - surface_x_dim, surface_y_dim, - src_x_coord, src_y_coord, - sample_grid_x, sample_grid_y) - surface_coord = src_surface_coord.copy(surface) - return surface_coord - - # Copy across any AuxFactory instances, and regrid their reference - # surfaces where required. - for factory in src.aux_factories: - for coord in six.itervalues(factory.dependencies): - if coord is None: - continue - dims = src.coord_dims(coord) - if x_dim in dims and y_dim in dims: - result_coord = regrid_reference_surface(coord, dims, - x_dim, y_dim, - src_x_coord, - src_y_coord, - sample_grid_x, - sample_grid_y, - regrid_callback) - result.add_aux_coord(result_coord, dims) - coord_mapping[id(coord)] = result_coord - try: - result.add_aux_factory(factory.updated(coord_mapping)) - except KeyError: - msg = 'Cannot update aux_factory {!r} because of dropped' \ - ' coordinates.'.format(factory.name()) - warnings.warn(msg) - return result - def _check_units(self, coord): + from iris.coord_systems import GeogCS, RotatedGeogCS + if coord.coord_system is None: # No restriction on units. pass - elif isinstance(coord.coord_system, - (iris.coord_systems.GeogCS, - iris.coord_systems.RotatedGeogCS)): + elif isinstance( + coord.coord_system, + (GeogCS, RotatedGeogCS), + ): # Units for lat-lon or rotated pole must be 'degrees'. Note # that 'degrees_east' etc. are equal to 'degrees'. - if coord.units != 'degrees': - msg = "Unsupported units for coordinate system. " \ - "Expected 'degrees' got {!r}.".format(coord.units) + if coord.units != "degrees": + msg = ( + "Unsupported units for coordinate system. " + "Expected 'degrees' got {!r}.".format(coord.units) + ) raise ValueError(msg) else: # Units for other coord systems must be equal to metres. - if coord.units != 'm': - msg = "Unsupported units for coordinate system. " \ - "Expected 'metres' got {!r}.".format(coord.units) + if coord.units != "m": + msg = ( + "Unsupported units for coordinate system. " + "Expected 'metres' got {!r}.".format(coord.units) + ) raise ValueError(msg) def __call__(self, src): - """ + """Regrid onto target grid. + Regrid this :class:`~iris.cube.Cube` on to the target grid of this :class:`RectilinearRegridder`. The given cube must be defined with the same grid as the source grid used to create this :class:`RectilinearRegridder`. - Args: + If the source cube has lazy data, the returned cube will also + have lazy data. - * src: + Parameters + ---------- + src : :class:`~iris.cube.Cube` A :class:`~iris.cube.Cube` to be regridded. - Returns: + Returns + ------- + :class:`~iris.cube.Cube` A cube defined with the horizontal dimensions of the target and the other dimensions from this cube. The data values of this cube will be converted to values on the new grid using either nearest-neighbour or linear interpolation. + Notes + ----- + .. note:: + + If the source cube has lazy data, + `chunks `__ + in the horizontal dimensions will be combined before regridding. + """ + from iris.cube import Cube + # Validity checks. - if not isinstance(src, iris.cube.Cube): + if not isinstance(src, Cube): raise TypeError("'src' must be a Cube") if get_xy_dim_coords(src) != self._src_grid: - raise ValueError('The given cube is not defined on the same ' - 'source grid as this regridder.') + raise ValueError( + "The given cube is not defined on the same " + "source grid as this regridder." + ) src_x_coord, src_y_coord = get_xy_dim_coords(src) grid_x_coord, grid_y_coord = self._tgt_grid @@ -935,17 +905,23 @@ def __call__(self, src): grid_cs = grid_x_coord.coord_system if src_cs is None and grid_cs is None: - if not (src_x_coord.is_compatible(grid_x_coord) and - src_y_coord.is_compatible(grid_y_coord)): - raise ValueError("The rectilinear grid coordinates of the " - "given cube and target grid have no " - "coordinate system but they do not have " - "matching coordinate metadata.") + if not ( + src_x_coord.is_compatible(grid_x_coord) + and src_y_coord.is_compatible(grid_y_coord) + ): + raise ValueError( + "The rectilinear grid coordinates of the " + "given cube and target grid have no " + "coordinate system but they do not have " + "matching coordinate metadata." + ) elif src_cs is None or grid_cs is None: - raise ValueError("The rectilinear grid coordinates of the given " - "cube and target grid must either both have " - "coordinate systems or both have no coordinate " - "system but with matching coordinate metadata.") + raise ValueError( + "The rectilinear grid coordinates of the given " + "cube and target grid must either both have " + "coordinate systems or both have no coordinate " + "system but with matching coordinate metadata." + ) # Check the source grid units. for coord in (src_x_coord, src_y_coord): @@ -958,18 +934,180 @@ def __call__(self, src): # Compute the interpolated data values. x_dim = src.coord_dims(src_x_coord)[0] y_dim = src.coord_dims(src_y_coord)[0] - data = self._regrid(src.data, x_dim, y_dim, - src_x_coord, src_y_coord, - sample_grid_x, sample_grid_y, - self._method, self._extrapolation_mode) + + data = map_complete_blocks( + src, + self._regrid, + (y_dim, x_dim), + sample_grid_x.shape, + x_dim=x_dim, + y_dim=y_dim, + src_x_coord=src_x_coord, + src_y_coord=src_y_coord, + sample_grid_x=sample_grid_x, + sample_grid_y=sample_grid_y, + method=self._method, + extrapolation_mode=self._extrapolation_mode, + ) # Wrap up the data as a Cube. - regrid_callback = functools.partial(self._regrid, - method=self._method, - extrapolation_mode='nan') - result = self._create_cube(data, src, x_dim, y_dim, - src_x_coord, src_y_coord, - grid_x_coord, grid_y_coord, - sample_grid_x, sample_grid_y, - regrid_callback) + _regrid_callback = functools.partial( + self._regrid, + src_x_coord=src_x_coord, + src_y_coord=src_y_coord, + sample_grid_x=sample_grid_x, + sample_grid_y=sample_grid_y, + method=self._method, + extrapolation_mode="nan", + ) + + def regrid_callback(*args, **kwargs): + _data, dims = args + return _regrid_callback(_data, *dims, **kwargs) + + result = _create_cube( + data, + src, + [x_dim, y_dim], + [grid_x_coord, grid_y_coord], + 2, + regrid_callback, + ) return result + + +def _create_cube(data, src, src_dims, tgt_coords, num_tgt_dims, regrid_callback): + r"""Return a new cube for the result of regridding. + + Returned cube represents the result of regridding the source cube + onto the horizontal coordinates (e.g. latitude) of the target cube. + All the metadata and coordinates of the result cube are copied from + the source cube, with two exceptions: + + * Horizontal coordinates are copied from the target cube. + * Auxiliary coordinates which span the grid dimensions are + ignored. + + Parameters + ---------- + data : array + The regridded data as an N-dimensional NumPy array. + src : cube + The source Cube. + src_dims : tuple of int + The dimensions of the X and Y coordinate within the source Cube. + tgt_coords : tuple of :class:`iris.coords.Coord + Either two 1D :class:`iris.coords.DimCoord`, or two n-D + :class:`iris.coords.AuxCoord` representing the new grid's + X and Y coordinates. + num_tgt_dims : int + The number of dimensions that the `tgt_coords` span. + regrid_callback : callable + The routine that will be used to calculate the interpolated + values of any reference surfaces. + + Returns + ------- + cube + A new iris.cube.Cube instance. + """ + from iris.coords import DimCoord + from iris.cube import Cube + + result = Cube(data) + + if len(src_dims) >= 2: + grid_dim_x, grid_dim_y = src_dims[:2] + elif len(src_dims) == 1: + grid_dim_x = src_dims[0] + grid_dim_y = grid_dim_x + 1 + + if num_tgt_dims == 1: + grid_dim_x = grid_dim_y = min(src_dims) + for tgt_coord, dim in zip(tgt_coords, (grid_dim_x, grid_dim_y)): + if len(tgt_coord.shape) == 1: + if isinstance(tgt_coord, DimCoord) and dim is not None: + result.add_dim_coord(tgt_coord, dim) + else: + result.add_aux_coord(tgt_coord, dim) + else: + result.add_aux_coord(tgt_coord, (grid_dim_y, grid_dim_x)) + + result.metadata = copy.deepcopy(src.metadata) + + # Copy across all the coordinates which don't span the grid. + # Record a mapping from old coordinate IDs to new coordinates, + # for subsequent use in creating updated aux_factories. + + coord_mapping = {} + + def copy_coords(src_coords, add_method): + for coord in src_coords: + dims = src.coord_dims(coord) + if set(src_dims).intersection(set(dims)): + continue + if guess_coord_axis(coord) in ["X", "Y"]: + continue + + def dim_offset(dim): + offset = sum( + [d <= dim for d in (grid_dim_x, grid_dim_y) if d is not None] + ) + if offset and num_tgt_dims == 1: + offset -= 1 + offset -= sum([d <= dim for d in src_dims if d is not None]) + return dim + offset + + dims = [dim_offset(dim) for dim in dims] + result_coord = coord.copy() + # Add result_coord to the owner of add_method. + add_method(result_coord, dims) + coord_mapping[id(coord)] = result_coord + + copy_coords(src.dim_coords, result.add_dim_coord) + copy_coords(src.aux_coords, result.add_aux_coord) + + def regrid_reference_surface( + src_surface_coord, + surface_dims, + src_dims, + regrid_callback, + ): + # Determine which of the reference surface's dimensions span the X + # and Y dimensions of the source cube. + relative_surface_dims = [ + surface_dims.index(dim) if dim is not None else None for dim in src_dims + ] + surface = regrid_callback( + src_surface_coord.points, + relative_surface_dims, + ) + surface_coord = src_surface_coord.copy(surface) + return surface_coord + + # Copy across any AuxFactory instances, and regrid their reference + # surfaces where required. + for factory in src.aux_factories: + for coord in factory.dependencies.values(): + if coord is None: + continue + dims = src.coord_dims(coord) + if set(src_dims).intersection(dims): + result_coord = regrid_reference_surface( + coord, + dims, + src_dims, + regrid_callback, + ) + result.add_aux_coord(result_coord, dims) + coord_mapping[id(coord)] = result_coord + try: + result.add_aux_factory(factory.updated(coord_mapping)) + except KeyError: + msg = ( + "Cannot update aux_factory {!r} because of dropped" + " coordinates.".format(factory.name()) + ) + warnings.warn(msg, category=IrisImpossibleUpdateWarning) + + return result diff --git a/lib/iris/analysis/_scipy_interpolate.py b/lib/iris/analysis/_scipy_interpolate.py index 996a2ad50e..251fb4bf70 100644 --- a/lib/iris/analysis/_scipy_interpolate.py +++ b/lib/iris/analysis/_scipy_interpolate.py @@ -1,11 +1,7 @@ -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - import itertools -from scipy.sparse import csr_matrix import numpy as np - +from scipy.sparse import csr_matrix # ============================================================================ # | Copyright SciPy | @@ -15,11 +11,10 @@ # Source: https://github.com/scipy/scipy/blob/b94a5d5ccc08dddbc88453477ff2625\ # 9aeaafb32/scipy/interpolate/interpnd.pyx#L167 -def _ndim_coords_from_arrays(points, ndim=None): - """ - Convert a tuple of coordinate arrays to a (..., ndim)-shaped array. - """ + +def _ndim_coords_from_arrays(points, ndim=None): + """Convert a tuple of coordinate arrays to a (..., ndim)-shaped array.""" if isinstance(points, tuple) and len(points) == 1: # handle argument tuple points = points[0] @@ -27,8 +22,7 @@ def _ndim_coords_from_arrays(points, ndim=None): p = np.broadcast_arrays(*points) for j in range(1, len(p)): if p[j].shape != p[0].shape: - raise ValueError( - "coordinate arrays do not have the same shape") + raise ValueError("coordinate arrays do not have the same shape") points = np.empty(p[0].shape + (len(points),), dtype=float) for j, item in enumerate(p): points[..., j] = item @@ -45,10 +39,8 @@ def _ndim_coords_from_arrays(points, ndim=None): # source: https://github.com/scipy/scipy/blob/b94a5d5ccc08dddbc88453477ff2625\ # 9aeaafb32/scipy/interpolate/interpolate.py#L1400 -class _RegularGridInterpolator(object): - - """ - Interpolation on a regular grid in arbitrary dimensions +class _RegularGridInterpolator: + """Interpolation on a regular grid in arbitrary dimensions. The data must be defined on a regular grid; the grid spacing however may be uneven. Linear and nearest-neighbour interpolation are supported. After @@ -91,51 +83,62 @@ class _RegularGridInterpolator(object): regular grid structure. """ + # this class is based on code originally programmed by Johannes Buchner, # see https://github.com/JohannesBuchner/regulargrid - def __init__(self, points, values, method="linear", bounds_error=True, - fill_value=np.nan): + def __init__( + self, + points, + values, + method="linear", + bounds_error=True, + fill_value=np.nan, + ): if method not in ["linear", "nearest"]: raise ValueError("Method '%s' is not defined" % method) self.method = method self.bounds_error = bounds_error - if not hasattr(values, 'ndim'): + if not hasattr(values, "ndim"): # allow reasonable duck-typed values values = np.asarray(values) if len(points) > values.ndim: - raise ValueError("There are %d point arrays, but values has %d " - "dimensions" % (len(points), values.ndim)) + raise ValueError( + "There are %d point arrays, but values has %d " + "dimensions" % (len(points), values.ndim) + ) - if hasattr(values, 'dtype') and hasattr(values, 'astype'): + if hasattr(values, "dtype") and hasattr(values, "astype"): if not np.issubdtype(values.dtype, np.inexact): values = values.astype(float) self.fill_value = fill_value if fill_value is not None: - if hasattr(values, 'dtype') and not np.can_cast(fill_value, - values.dtype): - raise ValueError("fill_value must be either 'None' or " - "of a type compatible with values") + if hasattr(values, "dtype") and not np.can_cast(fill_value, values.dtype): + raise ValueError( + "fill_value must be either 'None' or " + "of a type compatible with values" + ) for i, p in enumerate(points): - if not np.all(np.diff(p) > 0.): - raise ValueError("The points in dimension %d must be strictly " - "ascending" % i) + if not np.all(np.diff(p) > 0.0): + raise ValueError( + "The points in dimension %d must be strictly ascending" % i + ) if not np.asarray(p).ndim == 1: - raise ValueError("The points in dimension %d must be " - "1-dimensional" % i) + raise ValueError("The points in dimension %d must be 1-dimensional" % i) if not values.shape[i] == len(p): - raise ValueError("There are %d points and %d values in " - "dimension %d" % (len(p), values.shape[i], i)) + raise ValueError( + "There are %d points and %d values in " + "dimension %d" % (len(p), values.shape[i], i) + ) self.grid = tuple([np.asarray(p) for p in points]) self.values = values def __call__(self, xi, method=None): - """ - Interpolation at coordinates + """Interpolation at coordinates. Parameters ---------- @@ -153,8 +156,7 @@ def __call__(self, xi, method=None): return self.interp_using_pre_computed_weights(weights) def compute_interp_weights(self, xi, method=None): - """ - Prepare the interpolator for interpolation to the given sample points. + """Prepare the interpolator for interpolation to the given sample points. .. note:: This interface provides the ability to reuse weights on multiple @@ -186,60 +188,67 @@ def compute_interp_weights(self, xi, method=None): ndim = len(self.grid) xi = _ndim_coords_from_arrays(xi, ndim=ndim) if xi.shape[-1] != ndim: - raise ValueError("The requested sample points xi have dimension " - "%d, but this RegularGridInterpolator has " - "dimension %d" % (xi.shape[1], ndim)) + raise ValueError( + "The requested sample points xi have dimension " + "%d, but this RegularGridInterpolator has " + "dimension %d" % (xi.shape[1], ndim) + ) xi_shape = xi.shape xi = xi.reshape(-1, xi_shape[-1]) if self.bounds_error: for i, p in enumerate(xi.T): - if not np.logical_and(np.all(self.grid[i][0] <= p), - np.all(p <= self.grid[i][-1])): - raise ValueError("One of the requested xi is out of " - "bounds in dimension %d" % i) + if not np.logical_and( + np.all(self.grid[i][0] <= p), np.all(p <= self.grid[i][-1]) + ): + raise ValueError( + "One of the requested xi is out of " + "bounds in dimension %d" % i + ) method = self.method if method is None else method prepared = (xi_shape, method) + self._find_indices(xi.T) - if method == 'linear': - + if method == "linear": xi_shape, method, indices, norm_distances, out_of_bounds = prepared # Allocate arrays for describing the sparse matrix. - n_src_values_per_result_value = 2 ** ndim + n_src_values_per_result_value = 2**ndim n_result_values = len(indices[0]) n_non_zero = n_result_values * n_src_values_per_result_value weights = np.ones(n_non_zero, dtype=norm_distances[0].dtype) col_indices = np.empty(n_non_zero) - row_ptrs = np.arange(0, n_non_zero + n_src_values_per_result_value, - n_src_values_per_result_value) - - corners = itertools.product(*[[(i, 1 - n), (i + 1, n)] - for i, n in zip(indices, - norm_distances)]) + row_ptrs = np.arange( + 0, + n_non_zero + n_src_values_per_result_value, + n_src_values_per_result_value, + ) + + corners = itertools.product( + *[[(i, 1 - n), (i + 1, n)] for i, n in zip(indices, norm_distances)] + ) shape = self.values.shape[:ndim] for i, corner in enumerate(corners): corner_indices = [ci for ci, cw in corner] - n_indices = np.ravel_multi_index(corner_indices, shape, - mode='wrap') + n_indices = np.ravel_multi_index(corner_indices, shape, mode="wrap") col_indices[i::n_src_values_per_result_value] = n_indices for ci, cw in corner: weights[i::n_src_values_per_result_value] *= cw n_src_values = np.prod(list(map(len, self.grid))) - sparse_matrix = csr_matrix((weights, col_indices, row_ptrs), - shape=(n_result_values, n_src_values)) + sparse_matrix = csr_matrix( + (weights, col_indices, row_ptrs), + shape=(n_result_values, n_src_values), + ) prepared = (xi_shape, method, sparse_matrix, None, out_of_bounds) return prepared def interp_using_pre_computed_weights(self, computed_weights): - """ - Perform the interpolation using pre-computed interpolation weights. + """Perform the interpolation using pre-computed interpolation weights. .. note:: This interface provides the ability to reuse weights on multiple @@ -254,8 +263,13 @@ def interp_using_pre_computed_weights(self, computed_weights): :meth:`compute_interp_weights`. """ - [xi_shape, method, indices, norm_distances, - out_of_bounds] = computed_weights + [ + xi_shape, + method, + indices, + norm_distances, + out_of_bounds, + ] = computed_weights method = self.method if method is None else method if method not in ["linear", "nearest"]: @@ -266,8 +280,7 @@ def interp_using_pre_computed_weights(self, computed_weights): if method == "linear": result = self._evaluate_linear_sparse(indices) elif method == "nearest": - result = self._evaluate_nearest( - indices, norm_distances, out_of_bounds) + result = self._evaluate_nearest(indices, norm_distances, out_of_bounds) if not self.bounds_error and self.fill_value is not None: result[out_of_bounds] = self.fill_value @@ -286,7 +299,7 @@ def _evaluate_linear_sparse(self, sparse_matrix): def _evaluate_nearest(self, indices, norm_distances, out_of_bounds): idx_res = [] for i, yi in zip(indices, norm_distances): - idx_res.append(np.where(yi <= .5, i, i + 1)) + idx_res.append(np.where(yi <= 0.5, i, i + 1)) return self.values[tuple(idx_res)] def _find_indices(self, xi): @@ -306,13 +319,13 @@ def _find_indices(self, xi): if grid.size == 1: norm_distances.append(x - grid[i]) else: - norm_distances.append((x - grid[i]) / - (grid[i + 1] - grid[i])) + norm_distances.append((x - grid[i]) / (grid[i + 1] - grid[i])) if not self.bounds_error: out_of_bounds += x < grid[0] out_of_bounds += x > grid[-1] return indices, norm_distances, out_of_bounds + # ============================================================================ # | END SciPy copyright | # ============================================================================ diff --git a/lib/iris/analysis/calculus.py b/lib/iris/analysis/calculus.py index fb9f1f31e1..4cb634efbe 100644 --- a/lib/iris/analysis/calculus.py +++ b/lib/iris/analysis/calculus.py @@ -1,51 +1,36 @@ -# (C) British Crown Copyright 2010 - 2019, Met Office +# Copyright Iris contributors # -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . -""" -Calculus operations on :class:`iris.cube.Cube` instances. +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Calculus operations on :class:`iris.cube.Cube` instances. See also: :mod:`NumPy `. """ -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa -import six - import re import warnings import cf_units import numpy as np -import iris.cube -import iris.coords -import iris.coord_systems import iris.analysis +from iris.analysis.cartography import ( + DEFAULT_SPHERICAL_EARTH_RADIUS, + DEFAULT_SPHERICAL_EARTH_RADIUS_UNIT, +) import iris.analysis.maths -from iris.analysis.cartography import (DEFAULT_SPHERICAL_EARTH_RADIUS, - DEFAULT_SPHERICAL_EARTH_RADIUS_UNIT) +import iris.coord_systems +import iris.coords from iris.util import delta +from iris.warnings import IrisUserWarning - -__all__ = ['cube_delta', 'differentiate', 'curl'] +__all__ = ["DIRECTIONAL_NAMES", "cube_delta", "curl", "differentiate"] def _construct_delta_coord(coord): - """ + """Return a coordinate of deltas between the given coordinate's points. + Return a coordinate of deltas between the given coordinate's points. If the original coordinate has length n and is circular then the result will be a coordinate of length n, otherwise the result will be @@ -54,10 +39,11 @@ def _construct_delta_coord(coord): """ if coord.ndim != 1: raise iris.exceptions.CoordinateMultiDimError(coord) - circular = getattr(coord, 'circular', False) + circular = getattr(coord, "circular", False) if coord.shape == (1,) and not circular: - raise ValueError('Cannot take interval differences of a single ' - 'valued coordinate.') + raise ValueError( + "Cannot take interval differences of a single valued coordinate." + ) if circular: circular_kwd = coord.units.modulus or True @@ -71,36 +57,43 @@ def _construct_delta_coord(coord): points = iris.util.delta(coord.points, 0, circular=circular_kwd) new_coord = iris.coords.AuxCoord.from_coord(coord).copy(points, bounds) - new_coord.rename('change_in_%s' % new_coord.name()) + new_coord.rename("change_in_%s" % new_coord.name()) return new_coord def _construct_midpoint_coord(coord, circular=None): - """ + """Return a coordinate of mid-points from the given coordinate. + Return a coordinate of mid-points from the given coordinate. If the given coordinate has length n and the circular flag set then the result will be a coordinate of length n, otherwise the result will be of length n-1. """ - if circular and not hasattr(coord, 'circular'): - msg = ("Cannot produce a circular midpoint for the '{}' coord, " - "which does not have a 'circular' attribute.") + if circular and not hasattr(coord, "circular"): + msg = ( + "Cannot produce a circular midpoint for the '{}' coord, " + "which does not have a 'circular' attribute." + ) raise ValueError(msg.format(coord.name())) if circular is None: - circular = getattr(coord, 'circular', False) - elif circular != getattr(coord, 'circular', False): - msg = ("Construction coordinate midpoints for the '{}' coordinate, " - "though it has the attribute 'circular'={}.") - warnings.warn(msg.format(circular, coord.circular, coord.name())) + circular = getattr(coord, "circular", False) + elif circular != getattr(coord, "circular", False): + msg = ( + "Construction coordinate midpoints for the '{}' coordinate, " + "though it has the attribute 'circular'={}." + ) + warnings.warn( + msg.format(circular, coord.circular, coord.name()), + category=IrisUserWarning, + ) if coord.ndim != 1: raise iris.exceptions.CoordinateMultiDimError(coord) if coord.shape == (1,) and not circular: - raise ValueError('Cannot take the midpoints of a single valued ' - 'coordinate.') + raise ValueError("Cannot take the midpoints of a single valued coordinate.") # Calculate the delta of the coordinate # (this deals with circularity nicely). @@ -124,38 +117,44 @@ def _construct_midpoint_coord(coord, circular=None): # Try creating a coordinate of the same type as before, otherwise, # make an AuxCoord. try: - mid_point_coord = coord.from_coord(coord).copy(mid_point_points, - mid_point_bounds) + mid_point_coord = coord.from_coord(coord).copy( + mid_point_points, mid_point_bounds + ) except ValueError: mid_point_coord = iris.coords.AuxCoord.from_coord(coord).copy( - mid_point_points, mid_point_bounds) + mid_point_points, mid_point_bounds + ) return mid_point_coord def cube_delta(cube, coord): - """ - Given a cube calculate the difference between each value in the - given coord's direction. + """Given a cube calculate the difference between each value in the coord's direction. - - Args: - - * coord - either a Coord instance or the unique name of a coordinate in the cube. + Parameters + ---------- + coord : + Either a Coord instance or the unique name of a coordinate in the cube. If a Coord instance is provided, it does not necessarily have to exist in the cube. - Example usage:: + Examples + -------- + :: - change_in_temperature_wrt_pressure = \ -cube_delta(temperature_cube, 'pressure') + change_in_temperature_wrt_pressure = cube_delta(temperature_cube, 'pressure') + Notes + ----- .. note:: Missing data support not yet implemented. + .. note:: + This function does not maintain laziness when called; it realises data. + See more at :doc:`/userguide/real_and_lazy_data`. + """ # handle the case where a user passes a coordinate name - if isinstance(coord, six.string_types): + if isinstance(coord, str): coord = cube.coord(coord) if coord.ndim != 1: @@ -163,19 +162,24 @@ def cube_delta(cube, coord): # Try and get a coord dim delta_dims = cube.coord_dims(coord.name()) - if ((coord.shape[0] == 1 and not getattr(coord, 'circular', False)) or - not delta_dims): - raise ValueError('Cannot calculate delta over {!r} as it has ' - 'length of 1.'.format(coord.name())) + if ( + coord.shape[0] == 1 and not getattr(coord, "circular", False) + ) or not delta_dims: + raise ValueError( + "Cannot calculate delta over {!r} as it has length of 1.".format( + coord.name() + ) + ) delta_dim = delta_dims[0] # Calculate the actual delta, taking into account whether the given # coordinate is circular. - delta_cube_data = delta(cube.data, delta_dim, - circular=getattr(coord, 'circular', False)) + delta_cube_data = delta( + cube.data, delta_dim, circular=getattr(coord, "circular", False) + ) # If the coord/dim is circular there is no change in cube shape - if getattr(coord, 'circular', False): + if getattr(coord, "circular", False): delta_cube = cube.copy(data=delta_cube_data) else: # Subset the cube to the appropriate new shape by knocking off @@ -188,29 +192,34 @@ def cube_delta(cube, coord): # Replace the delta_dim coords with midpoints # (no shape change if circular). for cube_coord in cube.coords(dimensions=delta_dim): - delta_cube.replace_coord(_construct_midpoint_coord( - cube_coord, circular=getattr(coord, 'circular', False))) + delta_cube.replace_coord( + _construct_midpoint_coord( + cube_coord, circular=getattr(coord, "circular", False) + ) + ) - delta_cube.rename('change_in_{}_wrt_{}'.format(delta_cube.name(), - coord.name())) + delta_cube.rename("change_in_{}_wrt_{}".format(delta_cube.name(), coord.name())) return delta_cube def differentiate(cube, coord_to_differentiate): - r""" + r"""Calculate the differential of a given cube. + Calculate the differential of a given cube with respect to the coord_to_differentiate. - Args: - - * coord_to_differentiate: + Parameters + ---------- + coord_to_differentiate : Either a Coord instance or the unique name of a coordinate which exists in the cube. If a Coord instance is provided, it does not necessarily have to exist on the cube. - Example usage:: + Examples + -------- + :: u_wind_acceleration = differentiate(u_wind_cube, 'forecast_time') @@ -244,17 +253,22 @@ def differentiate(cube, coord_to_differentiate): where `c` and `b` represent the input coordinate values and bounds, and `C` and `B` the output coordinate values and bounds. - .. note:: Difference method used is the same as :func:`cube_delta` + .. note:: + Difference method used is the same as :func:`cube_delta` and therefore has the same limitations. .. note:: Spherical differentiation does not occur in this routine. + .. note:: + This function does not maintain laziness when called; it realises data. + See more at :doc:`/userguide/real_and_lazy_data`. + """ # Get the delta cube in the required differential direction. # This operation results in a copy of the original cube. delta_cube = cube_delta(cube, coord_to_differentiate) - if isinstance(coord_to_differentiate, six.string_types): + if isinstance(coord_to_differentiate, str): coord = cube.coord(coord_to_differentiate) else: coord = coord_to_differentiate @@ -266,22 +280,24 @@ def differentiate(cube, coord_to_differentiate): delta_cube = iris.analysis.maths.divide(delta_cube, delta_coord, delta_dim) # Update the standard name - delta_cube.rename('derivative_of_{}_wrt_{}'.format(cube.name(), - coord.name())) + delta_cube.rename("derivative_of_{}_wrt_{}".format(cube.name(), coord.name())) return delta_cube def _curl_subtract(a, b): - """ + """Straight forward wrapper to :func:`iris.analysis.maths.subtract`. + Simple wrapper to :func:`iris.analysis.maths.subtract` to subtract two cubes, which deals with None in a way that makes sense in the context of curl. """ + from iris.cube import Cube + # We are definitely dealing with cubes or None - otherwise we have a # programmer error... - assert isinstance(a, iris.cube.Cube) or a is None - assert isinstance(b, iris.cube.Cube) or b is None + assert isinstance(a, Cube) or a is None + assert isinstance(b, Cube) or b is None if a is None and b is None: return None @@ -295,14 +311,17 @@ def _curl_subtract(a, b): def _curl_differentiate(cube, coord): - """ + """Straight forward wrapper to :func:`differentiate`. + Simple wrapper to :func:`differentiate` to differentiate a cube and deal with None in a way that makes sense in the context of curl. """ + from iris.cube import Cube + # We are definitely dealing with cubes/coords or None - otherwise we # have a programmer error... - assert isinstance(cube, iris.cube.Cube) or cube is None + assert isinstance(cube, Cube) or cube is None assert isinstance(coord, iris.coords.Coord) or coord is None if cube is None: @@ -316,15 +335,18 @@ def _curl_differentiate(cube, coord): def _curl_regrid(cube, prototype): - """ + """Straight forward wrapper to :ref`iris.cube.Cube.regridded`. + Simple wrapper to :ref`iris.cube.Cube.regridded` to deal with None in a way that makes sense in the context of curl. """ + from iris.cube import Cube + # We are definitely dealing with cubes or None - otherwise we have a # programmer error... - assert isinstance(cube, iris.cube.Cube) or cube is None - assert isinstance(prototype, iris.cube.Cube) + assert isinstance(cube, Cube) or cube is None + assert isinstance(prototype, Cube) if cube is None: result = None @@ -334,17 +356,20 @@ def _curl_regrid(cube, prototype): def _copy_cube_transformed(src_cube, data, coord_func): - """ + """Return a new cube with the given data with the coordinates transformed. + Returns a new cube based on the src_cube, but with the given data, and with the coordinates transformed via coord_func. The data must have the same number of dimensions as the source cube. """ + from iris.cube import Cube + assert src_cube.ndim == data.ndim # Start with just the metadata and the data... - new_cube = iris.cube.Cube(data) + new_cube = Cube(data) new_cube.metadata = src_cube.metadata new_cube.metadata = src_cube.metadata @@ -393,81 +418,83 @@ def coord_func(coord): def _coord_sin(coord): - """ - Return a coordinate which represents sin(coord). - - Args: + """Return a coordinate which represents sin(coord). - * coord - Coord instance with values in either degrees or radians + Parameters + ---------- + coord : + Coord instance with values in either degrees or radians. """ return _trig_method(coord, np.sin) def _coord_cos(coord): - """ - Return a coordinate which represents cos(coord). + """Return a coordinate which represents cos(coord). - Args: - - * coord - Coord instance with values in either degrees or radians + Parameters + ---------- + coord : + Coord instance with values in either degrees or radians. """ return _trig_method(coord, np.cos) def _trig_method(coord, trig_function): - """ - Return a coordinate which represents trig_function(coord). + """Return a coordinate which represents trig_function(coord). - Args: - - * coord - Coord instance with points values in either degrees or radians - * trig_function - Reference to a trigonometric function e.g. numpy.sin + Parameters + ---------- + coord : + Coord instance with points values in either degrees or radians. + trig_function : + Reference to a trigonometric function e.g. numpy.sin. """ # If we are in degrees create a copy that is in radians. - if coord.units == 'degrees': + if coord.units == "degrees": coord = coord.copy() - coord.convert_units('radians') + coord.convert_units("radians") trig_coord = iris.coords.AuxCoord.from_coord(coord) trig_coord.points = trig_function(coord.points) if coord.has_bounds(): trig_coord.bounds = trig_function(coord.bounds) - trig_coord.units = '1' - trig_coord.rename('{}({})'.format(trig_function.__name__, coord.name())) + trig_coord.units = "1" + trig_coord.rename("{}({})".format(trig_function.__name__, coord.name())) return trig_coord def curl(i_cube, j_cube, k_cube=None): - r""" + r"""Calculate the 2 or 3-dimensional spherical or cartesian curl. + Calculate the 2-dimensional or 3-dimensional spherical or cartesian curl of the given vector of cubes. + The cube standard names must match one of the combinations in + :data:`DIRECTIONAL_NAMES`. + As well as the standard x and y coordinates, this function requires each cube to possess a vertical or z-like coordinate (representing some form of height or pressure). This can be a scalar or dimension coordinate. - Args: - - * i_cube - The i cube of the vector to operate on - * j_cube - The j cube of the vector to operate on - - Kwargs: - - * k_cube - The k cube of the vector to operate on - - Return (i_cmpt_curl_cube, j_cmpt_curl_cube, k_cmpt_curl_cube) - + Parameters + ---------- + i_cube : + The i cube of the vector to operate on. + j_cube : + The j cube of the vector to operate on. + k_cube : optional + The k cube of the vector to operate on. + + Returns + ------- + List of cubes i_cmpt_curl_cube, j_cmpt_curl_cube, k_cmpt_curl_cube + + Notes + ----- If the k-cube is not passed in then the 2-dimensional curl will be calculated, yielding the result: [None, None, k_cube]. If the k-cube is passed in, the 3-dimensional curl will @@ -482,88 +509,92 @@ def curl(i_cube, j_cube, k_cube=None): GeogCS or RotatedGeogCS, the spherical curl will be calculated; otherwise the cartesian curl will be calculated: - Cartesian curl + * Cartesian curl + * When cartesian calculus is used, i_cube is the u component, + j_cube is the v component and k_cube is the w component. - When cartesian calculus is used, i_cube is the u component, - j_cube is the v component and k_cube is the w component. + The Cartesian curl is defined as: - The Cartesian curl is defined as: + .. math:: - .. math:: + \nabla\times \vec u = + (\frac{\delta w}{\delta y} - \frac{\delta v}{\delta z})\vec a_i + - + (\frac{\delta w}{\delta x} - \frac{\delta u}{\delta z})\vec a_j + + + (\frac{\delta v}{\delta x} - \frac{\delta u}{\delta y})\vec a_k - \nabla\times \vec u = - (\frac{\delta w}{\delta y} - \frac{\delta v}{\delta z})\vec a_i - - - (\frac{\delta w}{\delta x} - \frac{\delta u}{\delta z})\vec a_j - + - (\frac{\delta v}{\delta x} - \frac{\delta u}{\delta y})\vec a_k + * Spherical curl + * When spherical calculus is used, i_cube is the :math:`\phi` vector + component (e.g. eastward), j_cube is the :math:`\theta` component + (e.g. northward) and k_cube is the radial component. - Spherical curl + The spherical curl is defined as: - When spherical calculus is used, i_cube is the :math:`\phi` vector - component (e.g. eastward), j_cube is the :math:`\theta` component - (e.g. northward) and k_cube is the radial component. + .. math:: - The spherical curl is defined as: + \nabla\times \vec A = \frac{1}{r cos \theta} + (\frac{\delta}{\delta \theta} + (\vec A_\phi cos \theta) - + \frac{\delta \vec A_\theta}{\delta \phi}) \vec r + + \frac{1}{r}(\frac{1}{cos \theta} + \frac{\delta \vec A_r}{\delta \phi} - + \frac{\delta}{\delta r} (r \vec A_\phi))\vec \theta + + \frac{1}{r} + (\frac{\delta}{\delta r}(r \vec A_\theta) - + \frac{\delta \vec A_r}{\delta \theta}) \vec \phi - .. math:: + where phi is longitude, theta is latitude. - \nabla\times \vec A = \frac{1}{r cos \theta} - (\frac{\delta}{\delta \theta} - (\vec A_\phi cos \theta) - - \frac{\delta \vec A_\theta}{\delta \phi}) \vec r + - \frac{1}{r}(\frac{1}{cos \theta} - \frac{\delta \vec A_r}{\delta \phi} - - \frac{\delta}{\delta r} (r \vec A_\phi))\vec \theta + - \frac{1}{r} - (\frac{\delta}{\delta r}(r \vec A_\theta) - - \frac{\delta \vec A_r}{\delta \theta}) \vec \phi + .. note:: - where phi is longitude, theta is latitude. + This function does not maintain laziness when called; it realises data. + See more at :doc:`/userguide/real_and_lazy_data`. """ # Get the vector quantity names. # (i.e. ['easterly', 'northerly', 'vertical']) - vector_quantity_names, phenomenon_name = \ - spatial_vectors_with_phenom_name(i_cube, j_cube, k_cube) + vector_quantity_names, phenomenon_name = spatial_vectors_with_phenom_name( + i_cube, j_cube, k_cube + ) cubes = filter(None, [i_cube, j_cube, k_cube]) # get the names of all coords binned into useful comparison groups - coord_comparison = iris.analysis.coord_comparison(*cubes) + coord_comparison = iris.analysis._dimensional_metadata_comparison(*cubes) - bad_coords = coord_comparison['ungroupable_and_dimensioned'] + bad_coords = coord_comparison["ungroupable_and_dimensioned"] if bad_coords: - raise ValueError("Coordinates found in one cube that describe " - "a data dimension which weren't in the other " - "cube ({}), try removing this coordinate.".format( - ', '.join(group.name() for group in bad_coords))) - - bad_coords = coord_comparison['resamplable'] + raise ValueError( + "Coordinates found in one cube that describe " + "a data dimension which weren't in the other " + "cube ({}), try removing this coordinate.".format( + ", ".join(group.name() for group in bad_coords) + ) + ) + + bad_coords = coord_comparison["resamplable"] if bad_coords: - raise ValueError('Some coordinates are different ({}), consider ' - 'resampling.'.format( - ', '.join(group.name() for group in bad_coords))) - - ignore_string = '' - if coord_comparison['ignorable']: - ignore_string = ' (ignoring {})'.format( - ', '.join(group.name() for group in bad_coords)) + raise ValueError( + "Some coordinates are different ({}), consider resampling.".format( + ", ".join(group.name() for group in bad_coords) + ) + ) # Get the dim_coord, or None if none exist, for the xyz dimensions - x_coord = i_cube.coord(axis='X') - y_coord = i_cube.coord(axis='Y') - z_coord = i_cube.coord(axis='Z') + x_coord = i_cube.coord(axis="X") + y_coord = i_cube.coord(axis="Y") + z_coord = i_cube.coord(axis="Z") y_dim = i_cube.coord_dims(y_coord)[0] - horiz_cs = i_cube.coord_system('CoordSystem') + horiz_cs = i_cube.coord_system("CoordSystem") # Non-spherical coords? - spherical_coords = isinstance(horiz_cs, (iris.coord_systems.GeogCS, - iris.coord_systems.RotatedGeogCS)) + spherical_coords = isinstance( + horiz_cs, (iris.coord_systems.GeogCS, iris.coord_systems.RotatedGeogCS) + ) if not spherical_coords: - # TODO Implement some mechanism for conforming to a common grid dj_dx = _curl_differentiate(j_cube, x_coord) prototype_diff = dj_dx @@ -600,8 +631,8 @@ def curl(i_cube, j_cube, k_cube=None): di_dy = _curl_differentiate(i_cube, y_coord) di_dy = _curl_regrid(di_dy, prototype_diff) # Since prototype_diff == dj_dx we don't need to recalculate dj_dx -# dj_dx = _curl_differentiate(j_cube, x_coord) -# dj_dx = _curl_regrid(dj_dx, prototype_diff) + # dj_dx = _curl_differentiate(j_cube, x_coord) + # dj_dx = _curl_regrid(dj_dx, prototype_diff) k_cmpt = _curl_subtract(dj_dx, di_dy) di_dy = dj_dx = None @@ -615,10 +646,14 @@ def curl(i_cube, j_cube, k_cube=None): # (d/dtheta (i_cube * sin(lat)) - d_j_cube_dphi) # phi_cmpt = 1/r * ( d/dr (r * j_cube) - d_k_cube_dtheta) # theta_cmpt = 1/r * ( 1/cos(lat) * d_k_cube_dphi - d/dr (r * i_cube) - if y_coord.name() not in ['latitude', 'grid_latitude'] \ - or x_coord.name() not in ['longitude', 'grid_longitude']: - raise ValueError('Expecting latitude as the y coord and ' - 'longitude as the x coord for spherical curl.') + if y_coord.name() not in [ + "latitude", + "grid_latitude", + ] or x_coord.name() not in ["longitude", "grid_longitude"]: + raise ValueError( + "Expecting latitude as the y coord and " + "longitude as the x coord for spherical curl." + ) # Get the radius of the earth - and check for sphericity ellipsoid = horiz_cs @@ -628,20 +663,19 @@ def curl(i_cube, j_cube, k_cube=None): # TODO: Add a test for this r = ellipsoid.semi_major_axis r_unit = cf_units.Unit("m") - spherical = (ellipsoid.inverse_flattening == 0.0) + spherical = ellipsoid.inverse_flattening == 0.0 else: r = DEFAULT_SPHERICAL_EARTH_RADIUS r_unit = DEFAULT_SPHERICAL_EARTH_RADIUS_UNIT spherical = True if not spherical: - raise ValueError('Cannot take the curl over a non-spherical ' - 'ellipsoid.') + raise ValueError("Cannot take the curl over a non-spherical ellipsoid.") lon_coord = x_coord.copy() lat_coord = y_coord.copy() - lon_coord.convert_units('radians') - lat_coord.convert_units('radians') + lon_coord.convert_units("radians") + lat_coord.convert_units("radians") lat_cos_coord = _coord_cos(lat_coord) # TODO Implement some mechanism for conforming to a common grid @@ -654,12 +688,14 @@ def curl(i_cube, j_cube, k_cube=None): # recalculate dicos_dtheta. d_j_cube_dphi = _curl_differentiate(j_cube, lon_coord) d_j_cube_dphi = _curl_regrid(d_j_cube_dphi, prototype_diff) - new_lat_coord = d_j_cube_dphi.coord(axis='Y') + new_lat_coord = d_j_cube_dphi.coord(axis="Y") new_lat_cos_coord = _coord_cos(new_lat_coord) lat_dim = d_j_cube_dphi.coord_dims(new_lat_coord)[0] - r_cmpt = iris.analysis.maths.divide(_curl_subtract(d_j_cube_dphi, - dicos_dtheta), - r * new_lat_cos_coord, dim=lat_dim) + r_cmpt = iris.analysis.maths.divide( + _curl_subtract(d_j_cube_dphi, dicos_dtheta), + r * new_lat_cos_coord, + dim=lat_dim, + ) r_cmpt.units = r_cmpt.units / r_unit d_j_cube_dphi = dicos_dtheta = None @@ -673,7 +709,7 @@ def curl(i_cube, j_cube, k_cube=None): if drj_dr is None and d_k_cube_dtheta is None: phi_cmpt = None else: - phi_cmpt = 1/r * _curl_subtract(drj_dr, d_k_cube_dtheta) + phi_cmpt = 1 / r * _curl_subtract(drj_dr, d_k_cube_dtheta) phi_cmpt.units = phi_cmpt.units / r_unit drj_dr = d_k_cube_dtheta = None @@ -682,8 +718,7 @@ def curl(i_cube, j_cube, k_cube=None): d_k_cube_dphi = _curl_differentiate(k_cube, lon_coord) d_k_cube_dphi = _curl_regrid(d_k_cube_dphi, prototype_diff) if d_k_cube_dphi is not None: - d_k_cube_dphi = iris.analysis.maths.divide(d_k_cube_dphi, - lat_cos_coord) + d_k_cube_dphi = iris.analysis.maths.divide(d_k_cube_dphi, lat_cos_coord) dri_dr = _curl_differentiate(r * i_cube, z_coord) if dri_dr is not None: dri_dr.units = dri_dr.units * r_unit @@ -691,7 +726,7 @@ def curl(i_cube, j_cube, k_cube=None): if d_k_cube_dphi is None and dri_dr is None: theta_cmpt = None else: - theta_cmpt = 1/r * _curl_subtract(d_k_cube_dphi, dri_dr) + theta_cmpt = 1 / r * _curl_subtract(d_k_cube_dphi, dri_dr) theta_cmpt.units = theta_cmpt.units / r_unit d_k_cube_dphi = dri_dr = None @@ -699,17 +734,33 @@ def curl(i_cube, j_cube, k_cube=None): for direction, cube in zip(vector_quantity_names, result): if cube is not None: - cube.rename('%s curl of %s' % (direction, phenomenon_name)) + cube.rename("%s curl of %s" % (direction, phenomenon_name)) return result +#: Acceptable X-Y-Z standard name combinations that +#: :func:`curl` can use (via :func:`spatial_vectors_with_phenom_name`). +DIRECTIONAL_NAMES: tuple[tuple[str, str, str], ...] = ( + ("u", "v", "w"), + ("x", "y", "z"), + ("i", "j", "k"), + ("eastward", "northward", "upward"), + ("easterly", "northerly", "vertical"), + ("easterly", "northerly", "radial"), +) + + def spatial_vectors_with_phenom_name(i_cube, j_cube, k_cube=None): - """ + """Given spatially dependent cubes, return a list of the spatial coordinate names. + Given 2 or 3 spatially dependent cubes, return a list of the spatial coordinate names with appropriate phenomenon name. - This routine is designed to identify the vector quantites which each + The cube standard names must match one of the combinations in + :data:`DIRECTIONAL_NAMES`. + + This routine is designed to identify the vector quantities which each of the cubes provided represent and return a list of their 3d spatial dimension names and associated phenomenon. For example, given a cube of "u wind" and "v wind" the return value @@ -719,51 +770,58 @@ def spatial_vectors_with_phenom_name(i_cube, j_cube, k_cube=None): #doctest: +SKIP (['u', 'v', 'w'], 'wind') - """ - directional_names = (('u', 'v', 'w'), ('x', 'y', 'z'), ('i', 'j', 'k'), - ('eastward', 'northward', 'upward'), - ('easterly', 'northerly', 'vertical'), - ('easterly', 'northerly', 'radial')) + Notes + ----- + This function maintains laziness when called; it does not realise data. + See more at :doc:`/userguide/real_and_lazy_data`. + + """ # Create a list of the standard_names of our incoming cubes # (excluding the k_cube if it is None). - cube_standard_names = [cube.name() for cube in (i_cube, j_cube, k_cube) - if cube is not None] + cube_standard_names = [ + cube.name() for cube in (i_cube, j_cube, k_cube) if cube is not None + ] # Define a regular expr which represents (direction, phenomenon) # from the standard name of a cube. # e.g from "w wind" -> ("w", "wind") - vector_qty = re.compile(r'([^\W_]+)[\W_]+(.*)') + vector_qty = re.compile(r"([^\W_]+)[\W_]+(.*)") # Make a dictionary of {direction: phenomenon quantity} cube_directions, cube_phenomena = zip( - *[re.match(vector_qty, std_name).groups() - for std_name in cube_standard_names]) + *[re.match(vector_qty, std_name).groups() for std_name in cube_standard_names] + ) # Check that there is only one distinct phenomenon if len(set(cube_phenomena)) != 1: - raise ValueError('Vector phenomenon name not consistent between ' - 'vector cubes. Got cube phenomena: {}; from ' - 'standard names: {}.'.format( - ', '.join(cube_phenomena), - ', '.join(cube_standard_names))) + raise ValueError( + "Vector phenomenon name not consistent between " + "vector cubes. Got cube phenomena: {}; from " + "standard names: {}.".format( + ", ".join(cube_phenomena), ", ".join(cube_standard_names) + ) + ) # Get the appropriate direction list from the cube_directions we # have got from the standard name. direction = None - for possible_direction in directional_names: + for possible_direction in DIRECTIONAL_NAMES: # If this possible direction (minus the k_cube if it is none) # matches direction from the given cubes use it. - if possible_direction[0:len(cube_directions)] == cube_directions: + if possible_direction[0 : len(cube_directions)] == cube_directions: direction = possible_direction # If we didn't get a match, raise an Exception if direction is None: - direction_string = '; '.join(', '.join(possible_direction) - for possible_direction - in directional_names) - raise ValueError('{} are not recognised vector cube_directions. ' - 'Possible cube_directions are: {}.'.format( - cube_directions, direction_string)) + direction_string = "; ".join( + ", ".join(possible_direction) for possible_direction in DIRECTIONAL_NAMES + ) + raise ValueError( + "{} are not recognised vector cube_directions. " + "Possible cube_directions are: {}.".format( + cube_directions, direction_string + ) + ) return (direction, cube_phenomena[0]) diff --git a/lib/iris/analysis/cartography.py b/lib/iris/analysis/cartography.py index c1cb09e179..d3967dfef3 100644 --- a/lib/iris/analysis/cartography.py +++ b/lib/iris/analysis/cartography.py @@ -1,93 +1,94 @@ -# (C) British Crown Copyright 2010 - 2019, Met Office +# Copyright Iris contributors # -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . -""" -Various utilities and numeric transformations relevant to cartography. - -""" - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Various utilities and numeric transformations relevant to cartography.""" from collections import namedtuple import copy import warnings +import cartopy.crs as ccrs +import cartopy.img_transform import cf_units +import dask.array as da import numpy as np import numpy.ma as ma -import cartopy.img_transform -import cartopy.crs as ccrs -import iris.analysis -import iris.coords import iris.coord_systems +import iris.coords import iris.exceptions from iris.util import _meshgrid +import iris.warnings + from ._grid_angles import gridcell_angles, rotate_grid_vectors # List of contents to control Sphinx autodocs. # Unfortunately essential to get docs for the grid_angles functions. __all__ = [ - 'area_weights', - 'cosine_latitude_weights', - 'get_xy_contiguous_bounded_grids', - 'get_xy_grids', - 'gridcell_angles', - 'project', - 'rotate_grid_vectors', - 'rotate_pole', - 'rotate_winds', - 'unrotate_pole', - 'wrap_lons', - 'DistanceDifferential', - 'PartialDifferential'] + "DistanceDifferential", + "PartialDifferential", + "area_weights", + "cosine_latitude_weights", + "get_xy_contiguous_bounded_grids", + "get_xy_grids", + "gridcell_angles", + "project", + "rotate_grid_vectors", + "rotate_pole", + "rotate_winds", + "unrotate_pole", + "wrap_lons", +] # This value is used as a fall-back if the cube does not define the earth DEFAULT_SPHERICAL_EARTH_RADIUS = 6367470 # TODO: This should not be necessary, as CF is always in meters -DEFAULT_SPHERICAL_EARTH_RADIUS_UNIT = cf_units.Unit('m') +DEFAULT_SPHERICAL_EARTH_RADIUS_UNIT = cf_units.Unit("m") # Distance differentials for coordinate systems at specified locations -DistanceDifferential = namedtuple('DistanceDifferential', 'dx1 dy1 dx2 dy2') +DistanceDifferential = namedtuple("DistanceDifferential", "dx1 dy1 dx2 dy2") # Partial differentials between coordinate systems -PartialDifferential = namedtuple('PartialDifferential', 'dx1 dy1') +PartialDifferential = namedtuple("PartialDifferential", "dx1 dy1") def wrap_lons(lons, base, period): - """ - Wrap longitude values into the range between base and base+period. + """Wrap longitude values into the range between base and base+period. + Parameters + ---------- + lons : + base : + period : + + Examples + -------- .. testsetup:: import numpy as np from iris.analysis.cartography import wrap_lons - For example: + :: + >>> print(wrap_lons(np.array([185, 30, -200, 75]), -180, 360)) [-175. 30. 160. 75.] + Notes + ----- + This function maintains laziness when called; it does not realise data. + See more at :doc:`/userguide/real_and_lazy_data`. """ # It is important to use 64bit floating precision when changing a floats # numbers range. lons = lons.astype(np.float64) - return ((lons - base + period * 2) % period) + base + return ((lons - base) % period) + base def unrotate_pole(rotated_lons, rotated_lats, pole_lon, pole_lat): - """ + """Convert rotated-pole to unrotated longitudes and latitudes. + + ``pole_lat`` should describe the location of the rotated pole that + describes the arrays of rotated-pole longitudes and latitudes. + Convert arrays of rotated-pole longitudes and latitudes to unrotated arrays of longitudes and latitudes. The values of ``pole_lon`` and ``pole_lat`` should describe the location of the rotated pole that @@ -97,35 +98,35 @@ def unrotate_pole(rotated_lons, rotated_lats, pole_lon, pole_lat): rectilinear grid, the arrays of rotated-pole longitudes and latitudes must be of the same shape as each other. - Example:: - - lons, lats = unrotate_pole(rotated_lons, rotated_lats, \ - pole_lon, pole_lat) - .. note:: Uses proj.4 to perform the conversion. - Args: - - * rotated_lons: - An array of rotated-pole longitude values. - * rotated_lats: - An array of rotated-pole latitude values. - * pole_lon: - The longitude of the rotated pole that describes the arrays of - rotated-pole longitudes and latitudes. - * pole_lat: - The latitude of the rotated pole that describes the arrays of - rotated-pole longitudes and latitudes. - - Returns: - An array of unrotated longitudes and an array of unrotated latitudes. + Parameters + ---------- + rotated_lons : + An array of rotated-pole longitude values. + rotated_lats : + An array of rotated-pole latitude values. + pole_lon : + The longitude of the rotated pole that describes the arrays of + rotated-pole longitudes and latitudes. + pole_lat : + The latitude of the rotated pole that describes the arrays of + rotated-pole longitudes and latitudes. + + Returns + ------- + An array of unrotated longitudes and an array of unrotated latitudes. + + Examples + -------- + :: + + lons, lats = unrotate_pole(rotated_lons, rotated_lats, pole_lon, pole_lat) """ - src_proj = ccrs.RotatedGeodetic(pole_longitude=pole_lon, - pole_latitude=pole_lat) + src_proj = ccrs.RotatedGeodetic(pole_longitude=pole_lon, pole_latitude=pole_lat) target_proj = ccrs.Geodetic() - res = target_proj.transform_points(x=rotated_lons, y=rotated_lats, - src_crs=src_proj) + res = target_proj.transform_points(x=rotated_lons, y=rotated_lats, src_crs=src_proj) unrotated_lon = res[..., 0] unrotated_lat = res[..., 1] @@ -133,9 +134,9 @@ def unrotate_pole(rotated_lons, rotated_lats, pole_lon, pole_lat): def rotate_pole(lons, lats, pole_lon, pole_lat): - """ - Convert arrays of longitudes and latitudes to arrays of rotated-pole - longitudes and latitudes. The values of ``pole_lon`` and ``pole_lat`` + """Convert unrotated longitudes and latitudes to rotated-pole. + + The values of ``pole_lon`` and ``pole_lat`` should describe the rotated pole that the arrays of longitudes and latitudes are to be rotated onto. @@ -143,36 +144,35 @@ def rotate_pole(lons, lats, pole_lon, pole_lat): the arrays of rotated-pole longitudes and latitudes must be of the same shape as each other. - Example:: - - rotated_lons, rotated_lats = rotate_pole(lons, lats,\ - pole_lon, pole_lat) - .. note:: Uses proj.4 to perform the conversion. - Args: - - * lons: - An array of longitude values. - * lats: - An array of latitude values. - * pole_lon: - The longitude of the rotated pole that the arrays of longitudes and - latitudes are to be rotated onto. - * pole_lat: - The latitude of the rotated pole that the arrays of longitudes and - latitudes are to be rotated onto. - - Returns: - An array of rotated-pole longitudes and an array of rotated-pole - latitudes. + Parameters + ---------- + lons : + An array of longitude values. + lats : + An array of latitude values. + pole_lon : + The longitude of the rotated pole that the arrays of longitudes and + latitudes are to be rotated onto. + pole_lat : + The latitude of the rotated pole that the arrays of longitudes and + latitudes are to be rotated onto. + + Returns + ------- + An array of rotated-pole longitudes and an array of rotated-pole latitudes. + + Examples + -------- + :: + + rotated_lons, rotated_lats = rotate_pole(lons, lats, pole_lon, pole_lat) """ src_proj = ccrs.Geodetic() - target_proj = ccrs.RotatedGeodetic(pole_longitude=pole_lon, - pole_latitude=pole_lat) - res = target_proj.transform_points(x=lons, y=lats, - src_crs=src_proj) + target_proj = ccrs.RotatedGeodetic(pole_longitude=pole_lon, pole_latitude=pole_lat) + res = target_proj.transform_points(x=lons, y=lats, src_crs=src_proj) rotated_lon = res[..., 0] rotated_lat = res[..., 1] @@ -180,54 +180,59 @@ def rotate_pole(lons, lats, pole_lon, pole_lat): def _get_lon_lat_coords(cube): - lat_coords = [coord for coord in cube.coords() - if "latitude" in coord.name()] - lon_coords = [coord for coord in cube.coords() - if "longitude" in coord.name()] + def search_for_coord(coord_iterable, coord_name): + return [coord for coord in coord_iterable if coord_name in coord.name()] + + lat_coords = search_for_coord(cube.dim_coords, "latitude") or search_for_coord( + cube.coords(), "latitude" + ) + lon_coords = search_for_coord(cube.dim_coords, "longitude") or search_for_coord( + cube.coords(), "longitude" + ) if len(lat_coords) > 1 or len(lon_coords) > 1: raise ValueError( - "Calling `_get_lon_lat_coords` with multiple lat or lon coords" - " is currently disallowed") + "Calling `_get_lon_lat_coords` with multiple same-type (i.e. dim/aux) lat or lon coords" + " is currently disallowed" + ) lat_coord = lat_coords[0] lon_coord = lon_coords[0] - return (lon_coord, lat_coord) + return lon_coord, lat_coord def _xy_range(cube, mode=None): - """ - Return the x & y range of this Cube. - - Args: - - * cube - The cube for which to calculate xy extents. + """Return the x & y range of this Cube. - Kwargs: - - * mode - If the coordinate has bounds, set this to specify the - min/max calculation. - Set to iris.coords.POINT_MODE or iris.coords.BOUND_MODE. + Parameters + ---------- + cube : + The cube for which to calculate xy extents. + mode : optional + If the coordinate has bounds, set this to specify the + min/max calculation. + Set to iris.coords.POINT_MODE or iris.coords.BOUND_MODE. """ # Helpful error if we have an inappropriate CoordSystem cs = cube.coord_system("CoordSystem") - cs_valid_types = (iris.coord_systems.GeogCS, - iris.coord_systems.RotatedGeogCS) - if ((cs is not None) and not isinstance(cs, cs_valid_types)): - raise ValueError( - "Latlon coords cannot be found with {0}.".format(type(cs))) + cs_valid_types = ( + iris.coord_systems.GeogCS, + iris.coord_systems.RotatedGeogCS, + ) + if (cs is not None) and not isinstance(cs, cs_valid_types): + raise ValueError("Latlon coords cannot be found with {0}.".format(type(cs))) x_coord, y_coord = cube.coord(axis="X"), cube.coord(axis="Y") - cs = cube.coord_system('CoordSystem') + cs = cube.coord_system("CoordSystem") if x_coord.has_bounds() != y_coord.has_bounds(): raise ValueError( - 'Cannot get the range of the x and y coordinates if they do ' - 'not have the same presence of bounds.') + "Cannot get the range of the x and y coordinates if they do " + "not have the same presence of bounds." + ) if x_coord.has_bounds(): if mode not in [iris.coords.POINT_MODE, iris.coords.BOUND_MODE]: - raise ValueError( - 'When the coordinate has bounds, please specify "mode".') + raise ValueError('When the coordinate has bounds, please specify "mode".') _mode = mode else: _mode = iris.coords.POINT_MODE @@ -247,7 +252,7 @@ def _xy_range(cube, mode=None): y = y_coord.bounds # Get the x and y range - if getattr(x_coord, 'circular', False): + if getattr(x_coord, "circular", False): x_range = (np.min(x), np.min(x) + x_coord.units.modulus) else: x_range = (np.min(x), np.max(x)) @@ -258,17 +263,24 @@ def _xy_range(cube, mode=None): def get_xy_grids(cube): - """ - Return 2D X and Y points for a given cube. - - Args: + """Return 2D X and Y points for a given cube. - * cube - The cube for which to generate 2D X and Y points. + Parameters + ---------- + cube : + The cube for which to generate 2D X and Y points. - Example:: + Examples + -------- + :: x, y = get_xy_grids(cube) + Notes + ----- + This function maintains laziness when called; it does not realise data. + See more at :doc:`/userguide/real_and_lazy_data`. + """ x_coord, y_coord = cube.coord(axis="X"), cube.coord(axis="Y") @@ -288,15 +300,25 @@ def get_xy_grids(cube): def get_xy_contiguous_bounded_grids(cube): - """ - Return 2d arrays for x and y bounds. + """Return 2d arrays for x and y bounds. Returns array of shape (n+1, m+1). - Example:: + Parameters + ---------- + cube : :class:`iris.cube.Cube` + + Examples + -------- + :: xs, ys = get_xy_contiguous_bounded_grids(cube) + Notes + ----- + This function maintains laziness when called; it does not realise data. + See more at :doc:`/userguide/real_and_lazy_data`. + """ x_coord, y_coord = cube.coord(axis="X"), cube.coord(axis="Y") @@ -310,16 +332,11 @@ def get_xy_contiguous_bounded_grids(cube): def _quadrant_area(radian_lat_bounds, radian_lon_bounds, radius_of_earth): """Calculate spherical segment areas. - - radian_lat_bounds -- [n,2] array of latitude bounds (radians) - - radian_lon_bounds -- [n,2] array of longitude bounds (radians) - - radius_of_earth -- radius of the earth - (currently assumed spherical) - Area weights are calculated for each lat/lon cell as: - .. math:: + .. math:: - r^2 (lon_1 - lon_0) ( sin(lat_1) - sin(lat_0)) + r^2 (lon_1 - lon_0) ( sin(lat_1) - sin(lat_0)) The resulting array will have a shape of *(radian_lat_bounds.shape[0], radian_lon_bounds.shape[0])* @@ -327,16 +344,27 @@ def _quadrant_area(radian_lat_bounds, radian_lon_bounds, radius_of_earth): The calculations are done at 64 bit precision and the returned array will be of type numpy.float64. + Parameters + ---------- + radian_lat_bounds : + [n,2] array of latitude bounds (radians). + radian_lon_bounds : + [n,2] array of longitude bounds (radians). + radius_of_earth : + Radius of the earth (currently assumed spherical). + """ # ensure pairs of bounds - if (radian_lat_bounds.shape[-1] != 2 or - radian_lon_bounds.shape[-1] != 2 or - radian_lat_bounds.ndim != 2 or - radian_lon_bounds.ndim != 2): + if ( + radian_lat_bounds.shape[-1] != 2 + or radian_lon_bounds.shape[-1] != 2 + or radian_lat_bounds.ndim != 2 + or radian_lon_bounds.ndim != 2 + ): raise ValueError("Bounds must be [n,2] array") # fill in a new array of areas - radius_sqr = radius_of_earth ** 2 + radius_sqr = radius_of_earth**2 radian_lat_64 = radian_lat_bounds.astype(np.float64) radian_lon_64 = radian_lon_bounds.astype(np.float64) @@ -348,58 +376,78 @@ def _quadrant_area(radian_lat_bounds, radian_lon_bounds, radius_of_earth): return np.abs(areas) -def area_weights(cube, normalize=False): - r""" - Returns an array of area weights, with the same dimensions as the cube. +def area_weights(cube, normalize=False, compute=True, chunks=None): + r"""Return an array of area weights, with the same dimensions as the cube. This is a 2D lat/lon area weights array, repeated over the non lat/lon dimensions. - Args: - - * cube (:class:`iris.cube.Cube`): - The cube to calculate area weights for. - - Kwargs: - - * normalize (False/True): - If False, weights are grid cell areas. If True, weights are grid - cell areas divided by the total grid area. - The cube must have coordinates 'latitude' and 'longitude' with bounds. Area weights are calculated for each lat/lon cell as: - .. math:: + .. math:: - r^2 (lon_1 - lon_0) (\sin(lat_1) - \sin(lat_0)) + r^2 (lon_1 - lon_0) (\sin(lat_1) - \sin(lat_0)) Currently, only supports a spherical datum. Uses earth radius from the cube, if present and spherical. Defaults to iris.analysis.cartography.DEFAULT_SPHERICAL_EARTH_RADIUS. + Parameters + ---------- + cube : :class:`iris.cube.Cube` + The cube to calculate area weights for. + normalize : bool, default=False + If False, weights are grid cell areas. If True, weights are grid + cell areas divided by the total grid area. + compute : bool, default=True + If False, return a lazy dask array. If True, return a numpy array. + chunks : tuple, optional + If compute is False and a value is provided, then the result will use + these chunks instead of the same chunks as the cube data. The values + provided here will only be used along dimensions that are not latitude + or longitude. + + Returns + ------- + broad_weights : + """ # Get the radius of the earth cs = cube.coord_system("CoordSystem") if isinstance(cs, iris.coord_systems.GeogCS): if cs.inverse_flattening != 0.0: - warnings.warn("Assuming spherical earth from ellipsoid.") + warnings.warn( + "Assuming spherical earth from ellipsoid.", + category=iris.warnings.IrisDefaultingWarning, + ) radius_of_earth = cs.semi_major_axis - elif (isinstance(cs, iris.coord_systems.RotatedGeogCS) and - (cs.ellipsoid is not None)): + elif isinstance(cs, iris.coord_systems.RotatedGeogCS) and ( + cs.ellipsoid is not None + ): if cs.ellipsoid.inverse_flattening != 0.0: - warnings.warn("Assuming spherical earth from ellipsoid.") + warnings.warn( + "Assuming spherical earth from ellipsoid.", + category=iris.warnings.IrisDefaultingWarning, + ) radius_of_earth = cs.ellipsoid.semi_major_axis else: - warnings.warn("Using DEFAULT_SPHERICAL_EARTH_RADIUS.") + warnings.warn( + "Using DEFAULT_SPHERICAL_EARTH_RADIUS.", + category=iris.warnings.IrisDefaultingWarning, + ) radius_of_earth = DEFAULT_SPHERICAL_EARTH_RADIUS # Get the lon and lat coords and axes try: lon, lat = _get_lon_lat_coords(cube) except IndexError: - raise ValueError('Cannot get latitude/longitude ' - 'coordinates from cube {!r}.'.format(cube.name())) + raise ValueError( + "Cannot get latitude/longitude coordinates from cube {!r}.".format( + cube.name() + ) + ) if lat.ndim > 1: raise iris.exceptions.CoordinateMultiDimError(lat) @@ -413,8 +461,10 @@ def area_weights(cube, normalize=False): lon_dim = lon_dim[0] if lon_dim else None if not (lat.has_bounds() and lon.has_bounds()): - msg = ("Coordinates {!r} and {!r} must have bounds to determine " - "the area weights.".format(lat.name(), lon.name())) + msg = ( + "Coordinates {!r} and {!r} must have bounds to determine " + "the area weights.".format(lat.name(), lon.name()) + ) raise ValueError(msg) # Convert from degrees to radians @@ -422,19 +472,24 @@ def area_weights(cube, normalize=False): lon = lon.copy() for coord in (lat, lon): - if coord.units in (cf_units.Unit('degrees'), - cf_units.Unit('radians')): - coord.convert_units('radians') + if coord.units in (cf_units.Unit("degrees"), cf_units.Unit("radians")): + coord.convert_units("radians") else: - msg = ("Units of degrees or radians required, coordinate " - "{!r} has units: {!r}".format(coord.name(), - coord.units.name)) + msg = ( + "Units of degrees or radians required, coordinate " + "{!r} has units: {!r}".format(coord.name(), coord.units.name) + ) raise ValueError(msg) # Create 2D weights from bounds. # Use the geographical area as the weight for each cell - ll_weights = _quadrant_area(lat.bounds, - lon.bounds, radius_of_earth) + if compute: + lat_bounds = lat.bounds + lon_bounds = lon.bounds + else: + lat_bounds = lat.lazy_bounds() + lon_bounds = lon.lazy_bounds() + ll_weights = _quadrant_area(lat_bounds, lon_bounds, radius_of_earth) # Normalize the weights if necessary. if normalize: @@ -449,16 +504,17 @@ def area_weights(cube, normalize=False): if dim is not None: wshape.append(ll_weights.shape[idim]) ll_weights = ll_weights.reshape(wshape) - broad_weights = iris.util.broadcast_to_shape(ll_weights, - cube.shape, - broadcast_dims) + broad_weights = iris.util.broadcast_to_shape( + ll_weights, cube.shape, broadcast_dims, chunks=chunks + ) return broad_weights def cosine_latitude_weights(cube): - r""" - Returns an array of latitude weights, with the same dimensions as + r"""Calculate cosine latitude weights, with the same dimensions as the cube. + + Return an array of latitude weights, with the same dimensions as the cube. The weights are the cosine of latitude. These are n-dimensional latitude weights repeated over the dimensions @@ -470,12 +526,16 @@ def cosine_latitude_weights(cube): Weights are calculated for each latitude as: - .. math:: + .. math:: - w_l = \cos \phi_l + w_l = \cos \phi_l - Examples: + Parameters + ---------- + cube : :class:`iris.cube.Cube` + Examples + -------- Compute weights suitable for averaging type operations:: from iris.analysis.cartography import cosine_latitude_weights @@ -490,24 +550,28 @@ def cosine_latitude_weights(cube): cube = iris.load_cube(iris.sample_data_path('air_temp.pp')) weights = np.sqrt(cosine_latitude_weights(cube)) + Notes + ----- + This function maintains laziness when called; it does not realise data. + See more at :doc:`/userguide/real_and_lazy_data`. """ # Find all latitude coordinates, we want one and only one. - lat_coords = [coord for coord in cube.coords() - if "latitude" in coord.name()] + lat_coords = [coord for coord in cube.coords() if "latitude" in coord.name()] if len(lat_coords) > 1: raise ValueError("Multiple latitude coords are currently disallowed.") try: lat = lat_coords[0] except IndexError: - raise ValueError('Cannot get latitude ' - 'coordinate from cube {!r}.'.format(cube.name())) + raise ValueError( + "Cannot get latitude coordinate from cube {!r}.".format(cube.name()) + ) # Get the dimension position(s) of the latitude coordinate. lat_dims = cube.coord_dims(lat) # Convert to radians. lat = lat.copy() - lat.convert_units('radians') + lat.convert_units("radians") # Compute the weights as the cosine of latitude. In some cases, # particularly when working in 32-bit precision, the latitude values can @@ -516,13 +580,15 @@ def cosine_latitude_weights(cube): # warning if these are found. Then the cosine is computed and clipped to # the valid range [0, 1]. threshold = np.deg2rad(0.001) # small value for grid resolution - if np.any(lat.points < -np.pi / 2. - threshold) or \ - np.any(lat.points > np.pi / 2. + threshold): - warnings.warn('Out of range latitude values will be ' - 'clipped to the valid range.', - UserWarning) + if np.any(lat.points < -np.pi / 2.0 - threshold) or np.any( + lat.points > np.pi / 2.0 + threshold + ): + warnings.warn( + "Out of range latitude values will be clipped to the valid range.", + category=iris.warnings.IrisDefaultingWarning, + ) points = lat.points - l_weights = np.cos(points).clip(0., 1.) + l_weights = np.cos(points).clip(0.0, 1.0) # Create weights for each grid point. This operation handles adding extra # dimensions and also the order of the dimensions. @@ -532,16 +598,13 @@ def cosine_latitude_weights(cube): if dim is not None: wshape.append(l_weights.shape[idim]) l_weights = l_weights.reshape(wshape) - broad_weights = iris.util.broadcast_to_shape(l_weights, - cube.shape, - broadcast_dims) + broad_weights = iris.util.broadcast_to_shape(l_weights, cube.shape, broadcast_dims) return broad_weights def project(cube, target_proj, nx=None, ny=None): - """ - Nearest neighbour regrid to a specified target projection. + """Nearest neighbour regrid to a specified target projection. Return a new cube that is the result of projecting a cube with 1 or 2 dimensional latitude-longitude coordinates from its coordinate system into @@ -550,25 +613,34 @@ def project(cube, target_proj, nx=None, ny=None): prevent one from directly visualising the data, e.g. when the longitude and latitude are two dimensional and do not make up a regular grid. - Args: - * cube - An instance of :class:`iris.cube.Cube`. - * target_proj - An instance of the Cartopy Projection class, or an instance of - :class:`iris.coord_systems.CoordSystem` from which a projection - will be obtained. - Kwargs: - * nx - Desired number of sample points in the x direction for a domain - covering the globe. - * ny - Desired number of sample points in the y direction for a domain - covering the globe. - - Returns: + Parameters + ---------- + cube : :class:`iris.cube.Cube` + An instance of :class:`iris.cube.Cube`. + target_proj : :class:`iris.coord_systems.CoordSystem` + An instance of the Cartopy Projection class, or an instance of + :class:`iris.coord_systems.CoordSystem` from which a projection + will be obtained. + nx : optional + Desired number of sample points in the x direction for a domain + covering the globe. + ny : optional + Desired number of sample points in the y direction for a domain + covering the globe. + + Returns + ------- + :class:`iris.cube.Cube` An instance of :class:`iris.cube.Cube` and a list describing the extent of the projection. + Notes + ----- + .. note:: + + If there are both dim and aux latitude-longitude coordinates, only + the dim coordinates will be used. + .. note:: This function assumes global data and will if necessary extrapolate @@ -582,6 +654,11 @@ def project(cube, target_proj, nx=None, ny=None): resulting nearest neighbour values. If masked, the value in the resulting cube is set to 0. + .. note:: + + This function does not maintain laziness when called; it realises data. + See more at :doc:`/userguide/real_and_lazy_data`. + .. warning:: This function uses a nearest neighbour approach rather than any form @@ -590,31 +667,46 @@ def project(cube, target_proj, nx=None, ny=None): on the statistics of the data e.g. the mean and standard deviation will not be preserved. + .. warning:: + + If the target projection is non-rectangular, e.g. Robinson, the target + grid may include points outside the boundary of the projection. The + latitude/longitude of such points may be unpredictable. + """ try: lon_coord, lat_coord = _get_lon_lat_coords(cube) except IndexError: - raise ValueError('Cannot get latitude/longitude ' - 'coordinates from cube {!r}.'.format(cube.name())) + raise ValueError( + "Cannot get latitude/longitude coordinates from cube {!r}.".format( + cube.name() + ) + ) if lat_coord.coord_system != lon_coord.coord_system: - raise ValueError('latitude and longitude coords appear to have ' - 'different coordinates systems.') + raise ValueError( + "latitude and longitude coords appear to have " + "different coordinates systems." + ) - if lon_coord.units != 'degrees': + if lon_coord.units != "degrees": lon_coord = lon_coord.copy() - lon_coord.convert_units('degrees') - if lat_coord.units != 'degrees': + lon_coord.convert_units("degrees") + if lat_coord.units != "degrees": lat_coord = lat_coord.copy() - lat_coord.convert_units('degrees') + lat_coord.convert_units("degrees") # Determine source coordinate system if lat_coord.coord_system is None: # Assume WGS84 latlon if unspecified - warnings.warn('Coordinate system of latitude and longitude ' - 'coordinates is not specified. Assuming WGS84 Geodetic.') - orig_cs = iris.coord_systems.GeogCS(semi_major_axis=6378137.0, - inverse_flattening=298.257223563) + warnings.warn( + "Coordinate system of latitude and longitude " + "coordinates is not specified. Assuming WGS84 Geodetic.", + category=iris.warnings.IrisDefaultingWarning, + ) + orig_cs = iris.coord_systems.GeogCS( + semi_major_axis=6378137.0, inverse_flattening=298.257223563 + ) else: orig_cs = lat_coord.coord_system @@ -641,12 +733,14 @@ def project(cube, target_proj, nx=None, ny=None): ny = source_x.shape[0] target_x, target_y, extent = cartopy.img_transform.mesh_projection( - target_proj, nx, ny) + target_proj, nx, ny + ) # Determine dimension mappings - expect either 1d or 2d if lat_coord.ndim != lon_coord.ndim: - raise ValueError("The latitude and longitude coordinates have " - "different dimensionality.") + raise ValueError( + "The latitude and longitude coordinates have different dimensionality." + ) latlon_ndim = lat_coord.ndim lon_dims = cube.coord_dims(lon_coord) @@ -657,16 +751,20 @@ def project(cube, target_proj, nx=None, ny=None): ydim = lat_dims[0] elif latlon_ndim == 2: if lon_dims != lat_dims: - raise ValueError("The 2d latitude and longitude coordinates " - "correspond to different dimensions.") + raise ValueError( + "The 2d latitude and longitude coordinates " + "correspond to different dimensions." + ) # If coords are 2d assume that grid is ordered such that x corresponds # to the last dimension (shortest stride). xdim = lon_dims[1] ydim = lon_dims[0] else: - raise ValueError('Expected the latitude and longitude coordinates ' - 'to have 1 or 2 dimensions, got {} and ' - '{}.'.format(lat_coord.ndim, lon_coord.ndim)) + raise ValueError( + "Expected the latitude and longitude coordinates " + "to have 1 or 2 dimensions, got {} and " + "{}.".format(lat_coord.ndim, lon_coord.ndim) + ) # Create array to store regridded data new_shape = list(cube.shape) @@ -683,32 +781,34 @@ def project(cube, target_proj, nx=None, ny=None): elif lat_coord.ndim == 2 and lon_coord.ndim == 2: slice_it = cube.slices(lat_coord) else: - raise ValueError('Expected the latitude and longitude coordinates ' - 'to have 1 or 2 dimensions, got {} and ' - '{}.'.format(lat_coord.ndim, lon_coord.ndim)) - -# # Mask out points outside of extent in source_cs - disabled until -# # a way to specify global/limited extent is agreed upon and code -# # is generalised to handle -180 to +180, 0 to 360 and >360 longitudes. -# source_desired_xy = source_cs.transform_points(target_proj, -# target_x.flatten(), -# target_y.flatten()) -# if np.any(source_x < 0.0) and np.any(source_x > 180.0): -# raise ValueError('Unable to handle range of longitude.') -# # This does not work in all cases e.g. lon > 360 -# if np.any(source_x > 180.0): -# source_desired_x = (source_desired_xy[:, 0].reshape(ny, nx) + -# 360.0) % 360.0 -# else: -# source_desired_x = source_desired_xy[:, 0].reshape(ny, nx) -# source_desired_y = source_desired_xy[:, 1].reshape(ny, nx) -# outof_extent_points = ((source_desired_x < source_x.min()) | -# (source_desired_x > source_x.max()) | -# (source_desired_y < source_y.min()) | -# (source_desired_y > source_y.max())) -# # Make array a mask by default (rather than a single bool) to allow mask -# # to be assigned to slices. -# new_data.mask = np.zeros(new_shape) + raise ValueError( + "Expected the latitude and longitude coordinates " + "to have 1 or 2 dimensions, got {} and " + "{}.".format(lat_coord.ndim, lon_coord.ndim) + ) + + # # Mask out points outside of extent in source_cs - disabled until + # # a way to specify global/limited extent is agreed upon and code + # # is generalised to handle -180 to +180, 0 to 360 and >360 longitudes. + # source_desired_xy = source_cs.transform_points(target_proj, + # target_x.flatten(), + # target_y.flatten()) + # if np.any(source_x < 0.0) and np.any(source_x > 180.0): + # raise ValueError('Unable to handle range of longitude.') + # # This does not work in all cases e.g. lon > 360 + # if np.any(source_x > 180.0): + # source_desired_x = (source_desired_xy[:, 0].reshape(ny, nx) + + # 360.0) % 360.0 + # else: + # source_desired_x = source_desired_xy[:, 0].reshape(ny, nx) + # source_desired_y = source_desired_xy[:, 1].reshape(ny, nx) + # outof_extent_points = ((source_desired_x < source_x.min()) | + # (source_desired_x > source_x.max()) | + # (source_desired_y < source_y.min()) | + # (source_desired_y > source_y.max())) + # # Make array a mask by default (rather than a single bool) to allow mask + # # to be assigned to slices. + # new_data.mask = np.zeros(new_shape) # Step through cube data, regrid onto desired projection and insert results # in new_data array @@ -718,14 +818,18 @@ def project(cube, target_proj, nx=None, ny=None): index[xdim] = slice(None, None) index[ydim] = slice(None, None) index = tuple(index) # Numpy>=1.16 : index with tuple, *not* list. - new_data[index] = cartopy.img_transform.regrid(ll_slice.data, - source_x, source_y, - source_cs, - target_proj, - target_x, target_y) - -# # Mask out points beyond extent -# new_data[index].mask[outof_extent_points] = True + new_data[index] = cartopy.img_transform.regrid( + ll_slice.data, + source_x, + source_y, + source_cs, + target_proj, + target_x, + target_y, + ) + + # # Mask out points beyond extent + # new_data[index].mask[outof_extent_points] = True # Remove mask if it is unnecessary if not np.any(new_data.mask): @@ -735,30 +839,40 @@ def project(cube, target_proj, nx=None, ny=None): new_cube = iris.cube.Cube(new_data) # Add new grid coords - x_coord = iris.coords.DimCoord(target_x[0, :], 'projection_x_coordinate', - units='m', - coord_system=copy.copy(target_cs)) - y_coord = iris.coords.DimCoord(target_y[:, 0], 'projection_y_coordinate', - units='m', - coord_system=copy.copy(target_cs)) + x_coord = iris.coords.DimCoord( + target_x[0, :], + "projection_x_coordinate", + units="m", + coord_system=copy.copy(target_cs), + ) + y_coord = iris.coords.DimCoord( + target_y[:, 0], + "projection_y_coordinate", + units="m", + coord_system=copy.copy(target_cs), + ) new_cube.add_dim_coord(x_coord, xdim) new_cube.add_dim_coord(y_coord, ydim) # Add resampled lat/lon in original coord system - source_desired_xy = source_cs.transform_points(target_proj, - target_x.flatten(), - target_y.flatten()) + source_desired_xy = source_cs.transform_points( + target_proj, target_x.flatten(), target_y.flatten() + ) new_lon_points = source_desired_xy[:, 0].reshape(ny, nx) new_lat_points = source_desired_xy[:, 1].reshape(ny, nx) - new_lon_coord = iris.coords.AuxCoord(new_lon_points, - standard_name='longitude', - units='degrees', - coord_system=orig_cs) - new_lat_coord = iris.coords.AuxCoord(new_lat_points, - standard_name='latitude', - units='degrees', - coord_system=orig_cs) + new_lon_coord = iris.coords.AuxCoord( + new_lon_points, + standard_name="longitude", + units="degrees", + coord_system=orig_cs, + ) + new_lat_coord = iris.coords.AuxCoord( + new_lat_points, + standard_name="latitude", + units="degrees", + coord_system=orig_cs, + ) new_cube.add_aux_coord(new_lon_coord, [ydim, xdim]) new_cube.add_aux_coord(new_lat_coord, [ydim, xdim]) @@ -773,11 +887,14 @@ def project(cube, target_proj, nx=None, ny=None): new_cube.add_aux_coord(coord.copy(), cube.coord_dims(coord)) discarded_coords = coords_to_ignore.difference([lat_coord, lon_coord]) if discarded_coords: - warnings.warn('Discarding coordinates that share dimensions with ' - '{} and {}: {}'.format(lat_coord.name(), - lon_coord.name(), - [coord.name() for - coord in discarded_coords])) + warnings.warn( + "Discarding coordinates that share dimensions with {} and {}: {}".format( + lat_coord.name(), + lon_coord.name(), + [coord.name() for coord in discarded_coords], + ), + category=iris.warnings.IrisIgnoringWarning, + ) # TODO handle derived coords/aux_factories @@ -788,19 +905,21 @@ def project(cube, target_proj, nx=None, ny=None): def _transform_xy(crs_from, x, y, crs_to): - """ - Shorthand function to transform 2d points between coordinate - reference systems. + """Shorthand function to transform 2d points between coordinate reference systems. - Args: - - * crs_from, crs_to (:class:`cartopy.crs.Projection`): + Parameters + ---------- + crs_from : :class:`cartopy.crs.Projection` + The coordinate reference systems. + x, y : array + Point locations defined in 'crs_from'. + crs_to : :class:`cartopy.crs.Projection` The coordinate reference systems. - * x, y (arrays): - point locations defined in 'crs_from'. - Returns: - x, y : Arrays of locations defined in 'crs_to'. + Returns + ------- + x, y + Arrays of locations defined in 'crs_to'. """ pts = crs_to.transform_points(crs_from, x, y) @@ -808,19 +927,23 @@ def _transform_xy(crs_from, x, y, crs_to): def _inter_crs_differentials(crs1, x, y, crs2): - """ - Calculate coordinate partial differentials from crs1 to crs2. + """Calculate coordinate partial differentials from crs1 to crs2. Returns dx2/dx1, dy2/dx1, dx2/dy1 and dy2/dy1, at given locations. - Args: - - * crs1, crs2 (`cartopy.crs.Projection`): - The coordinate systems, "from" and "to". - * x, y (array): + Parameters + ---------- + crs1 : :class:`cartopy.crs.Projection` + The coordinate systems for "from". + x, y : array Point locations defined in 'crs1'. + crs2 : :class:`cartopy.crs.Projection` + The coordinate systems for "to". - Returns: + + Returns + ------- + arrays (dx2/dx1, dy2/dx1, dx2/dy1, dy2/dy1) at given locations. Each element of this tuple will be the same shape as the 'x' and 'y' arrays and will be the partial differentials between the two systems. @@ -859,61 +982,64 @@ def _inter_crs_differentials(crs1, x, y, crs2): def _crs_distance_differentials(crs, x, y): - """ + """Calculate d(distance) / d(x) and ... / d(y). + Calculate d(distance) / d(x) and ... / d(y) for a coordinate reference system at specified locations. - Args: - - * crs (:class:`cartopy.crs.Projection`): + Parameters + ---------- + crs : :class:`cartopy.crs.Projection` The coordinate reference system. - * x, y (array): + x, y : array Locations at which to calculate the differentials, defined in 'crs' coordinate reference system. - Returns: - (abs(ds/dx), abs(ds/dy)). + Returns + ------- + (abs(ds/dx), abs(ds/dy)) Numerically approximated partial differentials, i.e. scaling factors between changes in distance and changes in coordinate values. """ # Make a true-latlon coordinate system for distance calculations. - crs_latlon = ccrs.Geodetic(globe=ccrs.Globe(ellipse='sphere')) + crs_latlon = ccrs.Geodetic(globe=crs.globe) # Transform points to true-latlon (just to get the true latitudes). _, true_lat = _transform_xy(crs, x, y, crs_latlon) # Get coordinate differentials w.r.t. true-latlon. - dlon_dx, dlat_dx, dlon_dy, dlat_dy = \ - _inter_crs_differentials(crs, x, y, crs_latlon) + dlon_dx, dlat_dx, dlon_dy, dlat_dy = _inter_crs_differentials(crs, x, y, crs_latlon) # Calculate effective scalings of X and Y coordinates. - lat_factor = np.cos(np.deg2rad(true_lat))**2 + lat_factor = np.cos(np.deg2rad(true_lat)) ** 2 ds_dx = np.sqrt(dlat_dx * dlat_dx + dlon_dx * dlon_dx * lat_factor) ds_dy = np.sqrt(dlat_dy * dlat_dy + dlon_dy * dlon_dy * lat_factor) return ds_dx, ds_dy def _transform_distance_vectors(u_dist, v_dist, ds, dx2, dy2): - """ + """Transform distance vectors to another coordinate reference system. + Transform distance vectors from one coordinate reference system to another, preserving magnitude and physical direction. - Args: - - * u_dist, v_dist (array): + Parameters + ---------- + u_dist, v_dist : array Components of each vector along the x and y directions of the source crs at each location. - * ds (`DistanceDifferential`): + ds : `DistanceDifferential` Distance differentials for the source and the target crs at specified locations. - * dx2, dy2 (`PartialDifferential`): + dx2, dy2 : `PartialDifferential` Partial differentials from the source to the target crs. - Returns: + Returns + ------- + tuple (ut_dist, vt_dist): Tuple of arrays containing the vector components along the x and y directions of the target crs at each location. """ - # Scale input distance vectors --> source-coordinate differentials. u1, v1 = u_dist / ds.dx1, v_dist / ds.dy1 # Transform vectors into the target system. @@ -925,51 +1051,56 @@ def _transform_distance_vectors(u_dist, v_dist, ds, dx2, dy2): return u2_dist, v2_dist -def _transform_distance_vectors_tolerance_mask(src_crs, x, y, tgt_crs, - ds, dx2, dy2): - """ +def _transform_distance_vectors_tolerance_mask(src_crs, x, y, tgt_crs, ds, dx2, dy2): + """Return a mask that can be applied to data array to mask elements. + Return a mask that can be applied to data array to mask elements where the magnitude of vectors are not preserved due to numerical - errors introduced by the tranformation between coordinate systems. + errors introduced by the transformation between coordinate systems. - Args: - * src_crs (`cartopy.crs.Projection`): + Parameters + ---------- + src_crs : `cartopy.crs.Projection` The source coordinate reference systems. - * x, y (array): + x, y : array Locations of each vector defined in 'src_crs'. - * tgt_crs (`cartopy.crs.Projection`): + tgt_crs : `cartopy.crs.Projection` The target coordinate reference systems. - * ds (`DistanceDifferential`): - Distance differentials for src_crs and tgt_crs at specified locations - * dx2, dy2 (`PartialDifferential`): + ds : `DistanceDifferential` + Distance differentials for src_crs and tgt_crs at specified locations. + dx2, dy2 : `PartialDifferential` Partial differentials from src_crs to tgt_crs. - Returns: - 2d boolean array that is the same shape as x and y. + Returns + ------- + 2d boolean array that is the same shape as x and y. """ if x.shape != y.shape: - raise ValueError('Arrays do not have matching shapes. ' - 'x.shape is {}, y.shape is {}.'.format(x.shape, - y.shape)) + raise ValueError( + "Arrays do not have matching shapes. " + "x.shape is {}, y.shape is {}.".format(x.shape, y.shape) + ) ones = np.ones(x.shape) zeros = np.zeros(x.shape) u_one_t, v_zero_t = _transform_distance_vectors(ones, zeros, ds, dx2, dy2) u_zero_t, v_one_t = _transform_distance_vectors(zeros, ones, ds, dx2, dy2) # Squared magnitudes should be equal to one within acceptable tolerance. - # A value of atol=2e-3 is used, which corresponds to a change in magnitude - # of approximately 0.1%. + # A value of atol=2e-3 is used, which masks any magnitude changes >0.5% + # (approx percentage - based on experimenting). sqmag_1_0 = u_one_t**2 + v_zero_t**2 sqmag_0_1 = u_zero_t**2 + v_one_t**2 mask = np.logical_not( - np.logical_and(np.isclose(sqmag_1_0, ones, atol=2e-3), - np.isclose(sqmag_0_1, ones, atol=2e-3))) + np.logical_and( + np.isclose(sqmag_1_0, ones, atol=2e-3), + np.isclose(sqmag_0_1, ones, atol=2e-3), + ) + ) return mask def rotate_winds(u_cube, v_cube, target_cs): - r""" - Transform wind vectors to a different coordinate system. + r"""Transform wind vectors to a different coordinate system. The input cubes contain U and V components parallel to the local X and Y directions of the input grid at each point. @@ -983,23 +1114,27 @@ def rotate_winds(u_cube, v_cube, target_cs): also have two 2-dimensional auxiliary coordinates containing the X and Y locations in the target coordinate system. - Args: - - * u_cube + Parameters + ---------- + u_cube : An instance of :class:`iris.cube.Cube` that contains the x-component of the vector. - * v_cube + v_cube : An instance of :class:`iris.cube.Cube` that contains the y-component of the vector. - * target_cs + target_cs : An instance of :class:`iris.coord_systems.CoordSystem` that specifies the new grid directions. - Returns: + Returns + ------- + tuple of :class:`iris.cube.Cube` A (u', v') tuple of :class:`iris.cube.Cube` instances that are the u and v components in the requested target coordinate system. The units are the same as the inputs. + Notes + ----- .. note:: The U and V values relate to distance, with units such as 'm s-1'. @@ -1011,6 +1146,11 @@ def rotate_winds(u_cube, v_cube, target_cs): The names of the output cubes are those of the inputs, prefixed with 'transformed\_' (e.g. 'transformed_x_wind'). + .. note:: + + This function does not maintain laziness when called; it realises data. + See more at :doc:`/userguide/real_and_lazy_data`. + .. warning:: Conversion between rotated-pole and non-rotated systems can be @@ -1023,29 +1163,43 @@ def rotate_winds(u_cube, v_cube, target_cs): # Check u_cube and v_cube have the same shape. We iterate through # the u and v cube slices which relies on the shapes matching. if u_cube.shape != v_cube.shape: - msg = 'Expected u and v cubes to have the same shape. ' \ - 'u cube has shape {}, v cube has shape {}.' + msg = ( + "Expected u and v cubes to have the same shape. " + "u cube has shape {}, v cube has shape {}." + ) raise ValueError(msg.format(u_cube.shape, v_cube.shape)) # Check the u_cube and v_cube have the same x and y coords. - msg = 'Coordinates differ between u and v cubes. Coordinate {!r} from ' \ - 'u cube does not equal coordinate {!r} from v cube.' - if u_cube.coord(axis='x') != v_cube.coord(axis='x'): - raise ValueError(msg.format(u_cube.coord(axis='x').name(), - v_cube.coord(axis='x').name())) - if u_cube.coord(axis='y') != v_cube.coord(axis='y'): - raise ValueError(msg.format(u_cube.coord(axis='y').name(), - v_cube.coord(axis='y').name())) + msg = ( + "Coordinates differ between u and v cubes. Coordinate {!r} from " + "u cube does not equal coordinate {!r} from v cube." + ) + if u_cube.coord(axis="x") != v_cube.coord(axis="x"): + raise ValueError( + msg.format(u_cube.coord(axis="x").name(), v_cube.coord(axis="x").name()) + ) + if u_cube.coord(axis="y") != v_cube.coord(axis="y"): + raise ValueError( + msg.format(u_cube.coord(axis="y").name(), v_cube.coord(axis="y").name()) + ) # Check x and y coords have the same coordinate system. - x_coord = u_cube.coord(axis='x') - y_coord = u_cube.coord(axis='y') + x_coord = u_cube.coord(axis="x") + y_coord = u_cube.coord(axis="y") if x_coord.coord_system != y_coord.coord_system: - msg = "Coordinate systems of x and y coordinates differ. " \ - "Coordinate {!r} has a coord system of {!r}, but coordinate " \ - "{!r} has a coord system of {!r}." - raise ValueError(msg.format(x_coord.name(), x_coord.coord_system, - y_coord.name(), y_coord.coord_system)) + msg = ( + "Coordinate systems of x and y coordinates differ. " + "Coordinate {!r} has a coord system of {!r}, but coordinate " + "{!r} has a coord system of {!r}." + ) + raise ValueError( + msg.format( + x_coord.name(), + x_coord.coord_system, + y_coord.name(), + y_coord.coord_system, + ) + ) # Convert from iris coord systems to cartopy CRSs to access # transform functionality. Use projection as cartopy @@ -1063,18 +1217,22 @@ def rotate_winds(u_cube, v_cube, target_cs): x = x_coord.points y = y_coord.points if x.ndim != y.ndim or x.ndim > 2 or y.ndim > 2: - msg = 'x and y coordinates must have the same number of dimensions ' \ - 'and be either 1D or 2D. The number of dimensions are {} and ' \ - '{}, respectively.'.format(x.ndim, y.ndim) + msg = ( + "x and y coordinates must have the same number of dimensions " + "and be either 1D or 2D. The number of dimensions are {} and " + "{}, respectively.".format(x.ndim, y.ndim) + ) raise ValueError(msg) # Check the dimension mappings match between u_cube and v_cube. if u_cube.coord_dims(x_coord) != v_cube.coord_dims(x_coord): - raise ValueError('Dimension mapping of x coordinate differs ' - 'between u and v cubes.') + raise ValueError( + "Dimension mapping of x coordinate differs between u and v cubes." + ) if u_cube.coord_dims(y_coord) != v_cube.coord_dims(y_coord): - raise ValueError('Dimension mapping of y coordinate differs ' - 'between u and v cubes.') + raise ValueError( + "Dimension mapping of y coordinate differs between u and v cubes." + ) x_dims = u_cube.coord_dims(x_coord) y_dims = u_cube.coord_dims(y_coord) @@ -1091,11 +1249,17 @@ def rotate_winds(u_cube, v_cube, target_cs): x = x.transpose() y = y.transpose() - # Create resulting cubes. - ut_cube = u_cube.copy() - vt_cube = v_cube.copy() - ut_cube.rename('transformed_{}'.format(u_cube.name())) - vt_cube.rename('transformed_{}'.format(v_cube.name())) + # Create resulting cubes - produce lazy output data if at least + # one input cube has lazy data + lazy_output = u_cube.has_lazy_data() or v_cube.has_lazy_data() + if lazy_output: + ut_cube = u_cube.copy(data=da.empty_like(u_cube.lazy_data())) + vt_cube = v_cube.copy(data=da.empty_like(v_cube.lazy_data())) + else: + ut_cube = u_cube.copy() + vt_cube = v_cube.copy() + ut_cube.rename("transformed_{}".format(u_cube.name())) + vt_cube.rename("transformed_{}".format(v_cube.name())) # Get distance scalings for source crs. ds_dx1, ds_dy1 = _crs_distance_differentials(src_crs, x, y) @@ -1107,22 +1271,26 @@ def rotate_winds(u_cube, v_cube, target_cs): ds = DistanceDifferential(ds_dx1, ds_dy1, ds_dx2, ds_dy2) # Calculate coordinate partial differentials from source crs to target crs. - dx2_dx1, dy2_dx1, dx2_dy1, dy2_dy1 = _inter_crs_differentials(src_crs, - x, y, - target_crs) + dx2_dx1, dy2_dx1, dx2_dy1, dy2_dy1 = _inter_crs_differentials( + src_crs, x, y, target_crs + ) dx2 = PartialDifferential(dx2_dx1, dx2_dy1) dy2 = PartialDifferential(dy2_dx1, dy2_dy1) # Calculate mask based on preservation of magnitude. - mask = _transform_distance_vectors_tolerance_mask(src_crs, x, y, - target_crs, - ds, dx2, dy2) + mask = _transform_distance_vectors_tolerance_mask( + src_crs, x, y, target_crs, ds, dx2, dy2 + ) apply_mask = mask.any() if apply_mask: # Make masked arrays to accept masking. - ut_cube.data = ma.asanyarray(ut_cube.data) - vt_cube.data = ma.asanyarray(vt_cube.data) + if lazy_output: + ut_cube = ut_cube.copy(data=da.ma.empty_like(ut_cube.core_data())) + vt_cube = vt_cube.copy(data=da.ma.empty_like(vt_cube.core_data())) + else: + ut_cube.data = ma.asanyarray(ut_cube.data) + vt_cube.data = ma.asanyarray(vt_cube.data) # Project vectors with u, v components one horiz slice at a time and # insert into the resulting cubes. @@ -1135,16 +1303,20 @@ def rotate_winds(u_cube, v_cube, target_cs): for dim in dims: index[dim] = slice(None, None) index = tuple(index) - u = u_cube.data[index] - v = v_cube.data[index] + u = u_cube.core_data()[index] + v = v_cube.core_data()[index] ut, vt = _transform_distance_vectors(u, v, ds, dx2, dy2) if apply_mask: - ut = ma.asanyarray(ut) - ut[mask] = ma.masked - vt = ma.asanyarray(vt) - vt[mask] = ma.masked - ut_cube.data[index] = ut - vt_cube.data[index] = vt + if lazy_output: + ut = da.ma.masked_array(ut, mask=mask) + vt = da.ma.masked_array(vt, mask=mask) + else: + ut = ma.asanyarray(ut) + ut[mask] = ma.masked + vt = ma.asanyarray(vt) + vt[mask] = ma.masked + ut_cube.core_data()[index] = ut + vt_cube.core_data()[index] = vt # Calculate new coords of locations in target coordinate system. xyz_tran = target_crs.transform_points(src_crs, x, y) @@ -1158,18 +1330,20 @@ def rotate_winds(u_cube, v_cube, target_cs): xt = xt.transpose() yt = yt.transpose() - xt_coord = iris.coords.AuxCoord(xt, - standard_name='projection_x_coordinate', - coord_system=target_cs) - yt_coord = iris.coords.AuxCoord(yt, - standard_name='projection_y_coordinate', - coord_system=target_cs) + xt_coord = iris.coords.AuxCoord( + xt, standard_name="projection_x_coordinate", coord_system=target_cs + ) + yt_coord = iris.coords.AuxCoord( + yt, standard_name="projection_y_coordinate", coord_system=target_cs + ) # Set units based on coord_system. - if isinstance(target_cs, (iris.coord_systems.GeogCS, - iris.coord_systems.RotatedGeogCS)): - xt_coord.units = yt_coord.units = 'degrees' + if isinstance( + target_cs, + (iris.coord_systems.GeogCS, iris.coord_systems.RotatedGeogCS), + ): + xt_coord.units = yt_coord.units = "degrees" else: - xt_coord.units = yt_coord.units = 'm' + xt_coord.units = yt_coord.units = "m" ut_cube.add_aux_coord(xt_coord, dims) ut_cube.add_aux_coord(yt_coord, dims) diff --git a/lib/iris/analysis/geometry.py b/lib/iris/analysis/geometry.py index cca5d836ec..120b6dfaa6 100644 --- a/lib/iris/analysis/geometry.py +++ b/lib/iris/analysis/geometry.py @@ -1,64 +1,49 @@ -# (C) British Crown Copyright 2010 - 2018, Met Office +# Copyright Iris contributors # -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . -""" -Various utilities related to geometric operations. +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Various utilities related to geometric operations. .. note:: This module requires :mod:`shapely`. """ -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - import warnings -from shapely.geometry import Polygon - import numpy as np +from shapely.geometry import Polygon import iris.exceptions +import iris.warnings def _extract_relevant_cube_slice(cube, geometry): - """ - Given a shapely geometry object, this helper method returns - the tuple + """Calculate geometry intersection with spatial region defined by cube. + + This helper method returns the tuple (subcube, x_coord_of_subcube, y_coord_of_subcube, - (min_x_index, min_y_index, max_x_index, max_y_index)) + (min_x_index, min_y_index, max_x_index, max_y_index)). If cube and geometry don't overlap, returns None. """ - # Validate the input parameters - if not cube.coords(axis='x') or not cube.coords(axis='y'): - raise ValueError('The cube must contain x and y axes.') + if not cube.coords(axis="x") or not cube.coords(axis="y"): + raise ValueError("The cube must contain x and y axes.") - x_coords = cube.coords(axis='x') - y_coords = cube.coords(axis='y') + x_coords = cube.coords(axis="x") + y_coords = cube.coords(axis="y") if len(x_coords) != 1 or len(y_coords) != 1: - raise ValueError('The cube must contain one, and only one, coordinate ' - 'for each of the x and y axes.') + raise ValueError( + "The cube must contain one, and only one, coordinate " + "for each of the x and y axes." + ) x_coord = x_coords[0] y_coord = y_coords[0] if not (x_coord.has_bounds() and y_coord.has_bounds()): - raise ValueError('Both horizontal coordinates must have bounds.') + raise ValueError("Both horizontal coordinates must have bounds.") if x_coord.ndim != 1: raise iris.exceptions.CoordinateMultiDimError(x_coord) @@ -70,8 +55,8 @@ def _extract_relevant_cube_slice(cube, geometry): y_bounds = y_coord.bounds # identify ascending/descending coordinate dimensions - x_ascending = x_coord.points[1] - x_coord.points[0] > 0. - y_ascending = y_coord.points[1] - y_coord.points[0] > 0. + x_ascending = x_coord.points[1] - x_coord.points[0] > 0.0 + y_ascending = y_coord.points[1] - y_coord.points[0] > 0.0 # identify upper/lower bounds of coordinate dimensions x_bounds_lower = x_bounds[:, 0] if x_ascending else x_bounds[:, 1] @@ -85,32 +70,40 @@ def _extract_relevant_cube_slice(cube, geometry): x_min_ix = np.where(x_bounds_lower <= x_min_geom)[0] x_min_ix = x_min_ix[np.argmax(x_bounds_lower[x_min_ix])] except ValueError: - warnings.warn("The geometry exceeds the cube's x dimension at the " - "lower end.", UserWarning) + warnings.warn( + "The geometry exceeds the cube's x dimension at the lower end.", + category=iris.warnings.IrisGeometryExceedWarning, + ) x_min_ix = 0 if x_ascending else x_coord.points.size - 1 try: x_max_ix = np.where(x_bounds_upper >= x_max_geom)[0] x_max_ix = x_max_ix[np.argmin(x_bounds_upper[x_max_ix])] except ValueError: - warnings.warn("The geometry exceeds the cube's x dimension at the " - "upper end.", UserWarning) + warnings.warn( + "The geometry exceeds the cube's x dimension at the upper end.", + category=iris.warnings.IrisGeometryExceedWarning, + ) x_max_ix = x_coord.points.size - 1 if x_ascending else 0 try: y_min_ix = np.where(y_bounds_lower <= y_min_geom)[0] y_min_ix = y_min_ix[np.argmax(y_bounds_lower[y_min_ix])] except ValueError: - warnings.warn("The geometry exceeds the cube's y dimension at the " - "lower end.", UserWarning) + warnings.warn( + "The geometry exceeds the cube's y dimension at the lower end.", + category=iris.warnings.IrisGeometryExceedWarning, + ) y_min_ix = 0 if y_ascending else y_coord.points.size - 1 try: y_max_ix = np.where(y_bounds_upper >= y_max_geom)[0] y_max_ix = y_max_ix[np.argmin(y_bounds_upper[y_max_ix])] except ValueError: - warnings.warn("The geometry exceeds the cube's y dimension at the " - "upper end.", UserWarning) + warnings.warn( + "The geometry exceeds the cube's y dimension at the upper end.", + category=iris.warnings.IrisGeometryExceedWarning, + ) y_max_ix = y_coord.points.size - 1 if y_ascending else 0 # extract coordinate values at these indices @@ -127,22 +120,25 @@ def _extract_relevant_cube_slice(cube, geometry): bnds_ix = x_min_ix, y_min_ix, x_max_ix, y_max_ix # cut the relevant part from the original cube - coord_constr = {x_coord.name(): lambda x: x_min <= x.point <= x_max, - y_coord.name(): lambda y: y_min <= y.point <= y_max} + coord_constr = { + x_coord.name(): lambda x: x_min <= x.point <= x_max, + y_coord.name(): lambda y: y_min <= y.point <= y_max, + } constraint = iris.Constraint(coord_values=coord_constr) subcube = cube.extract(constraint) if subcube is None: return None else: - x_coord = subcube.coord(axis='x') - y_coord = subcube.coord(axis='y') + x_coord = subcube.coord(axis="x") + y_coord = subcube.coord(axis="y") return subcube, x_coord, y_coord, bnds_ix def geometry_area_weights(cube, geometry, normalize=False): - """ - Returns the array of weights corresponding to the area of overlap between + """Return the array of weights corresponding to the area of overlap. + + Return the array of weights corresponding to the area of overlap between the cells of cube's horizontal grid, and the given shapely geometry. The returned array is suitable for use with :const:`iris.analysis.MEAN`. @@ -163,23 +159,24 @@ def geometry_area_weights(cube, geometry, normalize=False): calculation might be wrong. In this case, a UserWarning will be issued. - Args: + .. note:: - * cube (:class:`iris.cube.Cube`): + This function does not maintain laziness when called; it realises data. + See more at :doc:`/userguide/real_and_lazy_data`. + + Parameters + ---------- + cube : :class:`iris.cube.Cube` A Cube containing a bounded, horizontal grid definition. - * geometry (a shapely geometry instance): + geometry : shapely geometry instance The geometry of interest. To produce meaningful results this geometry must have a non-zero area. Typically a Polygon or MultiPolygon. - - Kwargs: - - * normalize: + normalize : bool, default=False Calculate each individual cell weight as the cell area overlap between the cell and the given shapely geometry divided by the total cell area. Default is False. """ - # extract smallest subcube containing geometry shape = cube.shape extraction_results = _extract_relevant_cube_slice(cube, geometry) diff --git a/lib/iris/analysis/maths.py b/lib/iris/analysis/maths.py index 7440cb1a31..bd20b26019 100644 --- a/lib/iris/analysis/maths.py +++ b/lib/iris/analysis/maths.py @@ -1,73 +1,58 @@ -# (C) British Crown Copyright 2010 - 2019, Met Office +# Copyright Iris contributors # -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . -""" -Basic mathematical and statistical operations. - -""" - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Basic mathematical and statistical operations.""" +from functools import lru_cache import inspect import math import operator -import six import warnings import cf_units +import dask.array as da import numpy as np from numpy import ma +from iris._deprecation import warn_deprecated import iris.analysis +from iris.common import SERVICES, Resolve +from iris.common.lenient import _lenient_client +from iris.config import get_logger import iris.coords -import iris.cube import iris.exceptions import iris.util +import iris.warnings -import dask.array as da -from dask.array.core import broadcast_shapes - - -_output_dtype_cache = {} +# Configure the logger. +logger = get_logger(__name__) +@lru_cache(maxsize=128, typed=True) def _output_dtype(op, first_dtype, second_dtype=None, in_place=False): - """ + """Get the numpy dtype. + Get the numpy dtype corresponding to the result of applying a unary or binary operation to arguments of specified dtype. - Args: - - * op: + Parameters + ---------- + op : A unary or binary operator which can be applied to array-like objects. - * first_dtype: + first_dtype : The dtype of the first or only argument to the operator. - - Kwargs: - - * second_dtype: + second_dtype : optional The dtype of the second argument to the operator. - - * in_place: + in_place : bool, default=False Whether the operation is to be performed in place. - Returns: - An instance of :class:`numpy.dtype` + Returns + ------- + :class:`numpy.dtype` + Notes + ----- .. note:: The function always returns the dtype which would result if the @@ -80,80 +65,107 @@ def _output_dtype(op, first_dtype, second_dtype=None, in_place=False): # to failure to cast the result. result = first_dtype else: - operand_dtypes = (first_dtype, second_dtype) \ - if second_dtype is not None \ - else (first_dtype,) - key = (op, operand_dtypes) - result = _output_dtype_cache.get(key, None) - if result is None: - arrays = [np.array([1], dtype=dtype) for dtype in operand_dtypes] - result = op(*arrays).dtype - _output_dtype_cache[key] = result + operand_dtypes = ( + (first_dtype, second_dtype) if second_dtype is not None else (first_dtype,) + ) + arrays = [np.array([1], dtype=dtype) for dtype in operand_dtypes] + result = op(*arrays).dtype return result def _get_dtype(operand): - """ - Get the numpy dtype corresponding to the numeric data in the object - provided. - - Args: + """Get the numpy dtype corresponding to the numeric data in the object provided. - * operand: + Parameters + ---------- + operand : An instance of :class:`iris.cube.Cube` or :class:`iris.coords.Coord`, or a number or :class:`numpy.ndarray`. - Returns: - An instance of :class:`numpy.dtype` + Returns + ------- + :class:`numpy.dtype` """ - return np.min_scalar_type(operand) if np.isscalar(operand) \ - else operand.dtype + return np.min_scalar_type(operand) if np.isscalar(operand) else operand.dtype def abs(cube, in_place=False): - """ - Calculate the absolute values of the data in the Cube provided. - - Args: + """Calculate the absolute values of the data in the Cube provided. - * cube: + Parameters + ---------- + cube : An instance of :class:`iris.cube.Cube`. - - Kwargs: - - * in_place: + in_place : bool, default=False Whether to create a new Cube, or alter the given "cube". - Returns: - An instance of :class:`iris.cube.Cube`. + Returns + ------- + :class:`iris.cube.Cube`. + + Notes + ----- + This function maintains laziness when called; it does not realise data. + See more at :doc:`/userguide/real_and_lazy_data`. """ _assert_is_cube(cube) new_dtype = _output_dtype(np.abs, cube.dtype, in_place=in_place) op = da.absolute if cube.has_lazy_data() else np.abs - return _math_op_common(cube, op, cube.units, new_dtype, in_place=in_place) + return _math_op_common(cube, op, cube.units, new_dtype=new_dtype, in_place=in_place) def intersection_of_cubes(cube, other_cube): - """ - Return the two Cubes of intersection given two Cubes. + """Return the two Cubes of intersection given two Cubes. .. note:: The intersection of cubes function will ignore all single valued coordinates in checking the intersection. - Args: - - * cube: + Parameters + ---------- + cube : An instance of :class:`iris.cube.Cube`. - * other_cube: + other_cube : An instance of :class:`iris.cube.Cube`. - Returns: - A pair of :class:`iris.cube.Cube` instances in a tuple corresponding - to the original cubes restricted to their intersection. + Returns + ------- + A paired tuple of :class:`iris.cube.Cube` + A pair of :class:`iris.cube.Cube` instances in a tuple corresponding to + the original cubes restricted to their intersection. + + Notes + ----- + .. deprecated:: 3.2.0 + + Instead use :meth:`iris.cube.CubeList.extract_overlapping`. For example, + rather than calling + + .. code:: + + cube1, cube2 = intersection_of_cubes(cube1, cube2) + + replace with + + .. code:: + + cubes = CubeList([cube1, cube2]) + coords = ["latitude", "longitude"] # Replace with relevant coords + intersections = cubes.extract_overlapping(coords) + cube1, cube2 = (intersections[0], intersections[1]) + + This function maintains laziness when called; it does not realise data. + See more at :doc:`/userguide/real_and_lazy_data`. """ + wmsg = ( + "iris.analysis.maths.intersection_of_cubes has been deprecated and will " + "be removed, please use iris.cube.CubeList.extract_overlapping " + "instead. See intersection_of_cubes docstring for more information." + ) + warn_deprecated(wmsg) + # Take references of the original cubes (which will be copied when # slicing later). new_cube_self = cube @@ -165,11 +177,12 @@ def intersection_of_cubes(cube, other_cube): if coord.ndim != 1: raise iris.exceptions.CoordinateMultiDimError(coord) - coord_comp = iris.analysis.coord_comparison(cube, other_cube) + coord_comp = iris.analysis._dimensional_metadata_comparison(cube, other_cube) - if coord_comp['ungroupable_and_dimensioned']: - raise ValueError('Cubes do not share all coordinates in common, ' - 'cannot intersect.') + if coord_comp["ungroupable_and_dimensioned"]: + raise ValueError( + "Cubes do not share all coordinates in common, cannot intersect." + ) # cubes must have matching coordinates for coord in cube.coords(): @@ -186,249 +199,239 @@ def intersection_of_cubes(cube, other_cube): def _assert_is_cube(cube): - if not isinstance(cube, iris.cube.Cube): - raise TypeError('The "cube" argument must be an instance of ' - 'iris.cube.Cube.') - - -def _assert_compatible(cube, other): - """ - Checks to see if cube.data and another array can be broadcast to - the same shape. + from iris.cube import Cube - """ - try: - new_shape = broadcast_shapes(cube.shape, other.shape) - except ValueError as err: - # re-raise - raise ValueError("The array was not broadcastable to the cube's data " - "shape. The error message when " - "broadcasting:\n{}\nThe cube's shape was {} and the " - "array's shape was {}".format(err, cube.shape, - other.shape)) - - if cube.shape != new_shape: - raise ValueError("The array operation would increase the size or " - "dimensionality of the cube. The new cube's data " - "would have had to become: {}".format( - new_shape)) - - -def _assert_matching_units(cube, other, operation_name): - """ - Check that the units of the cube and the other item are the same, or if - the other does not have a unit, skip this test - """ - if cube.units != getattr(other, 'units', cube.units): - msg = 'Cannot use {!r} with differing units ({} & {})'.format( - operation_name, cube.units, other.units) - raise iris.exceptions.NotYetImplementedError(msg) + if not isinstance(cube, Cube): + raise TypeError('The "cube" argument must be an instance of ' "iris.cube.Cube.") +@_lenient_client(services=SERVICES) def add(cube, other, dim=None, in_place=False): - """ - Calculate the sum of two cubes, or the sum of a cube and a - coordinate or scalar value. - - When summing two cubes, they must both have the same coordinate - systems & data resolution. - - When adding a coordinate to a cube, they must both share the same - number of elements along a shared axis. - - Args: - - * cube: - An instance of :class:`iris.cube.Cube`. - * other: - An instance of :class:`iris.cube.Cube` or :class:`iris.coords.Coord`, - or a number or :class:`numpy.ndarray`. - - Kwargs: - - * dim: - If supplying a coord with no match on the cube, you must supply - the dimension to process. - * in_place: - Whether to create a new Cube, or alter the given "cube". - - Returns: - An instance of :class:`iris.cube.Cube`. + """Calculate the sum. + + Calculate the sum of two cubes, or the sum of a cube and a coordinate or + array or scalar value. + + When summing two cubes, they must both have the same coordinate systems and + data resolution. + + When adding a coordinate to a cube, they must both share the same number of + elements along a shared axis. + + Parameters + ---------- + cube : iris.cube.Cube + First operand to add. + other : iris.cube.Cube, iris.coords.Coord, number, numpy.ndarray or dask.array.Array + Second operand to add. + dim : int, optional + If `other` is a coord which does not exist on the cube, specify the + dimension to which it should be mapped. + in_place : bool, default=False + If `True`, alters the input cube. Otherwise a new cube is created. + + Returns + ------- + iris.cube.Cube + + Notes + ----- + This function maintains laziness when called; it does not realise data. + See more at :doc:`/userguide/real_and_lazy_data`. """ _assert_is_cube(cube) - new_dtype = _output_dtype(operator.add, cube.dtype, _get_dtype(other), - in_place=in_place) + new_dtype = _output_dtype( + operator.add, + cube.dtype, + second_dtype=_get_dtype(other), + in_place=in_place, + ) if in_place: - _inplace_common_checks(cube, other, 'addition') + _inplace_common_checks(cube, other, "addition") op = operator.iadd else: op = operator.add - return _add_subtract_common(op, 'add', cube, other, new_dtype, dim=dim, - in_place=in_place) + return _add_subtract_common( + op, "add", cube, other, new_dtype, dim=dim, in_place=in_place + ) +@_lenient_client(services=SERVICES) def subtract(cube, other, dim=None, in_place=False): - """ - Calculate the difference between two cubes, or the difference between - a cube and a coordinate or scalar value. - - When subtracting two cubes, they must both have the same coordinate - systems & data resolution. + """Calculate the difference. - When subtracting a coordinate to a cube, they must both share the - same number of elements along a shared axis. - - Args: + Calculate the difference between two cubes, or the difference between + a cube and a coordinate or array or scalar value. - * cube: - An instance of :class:`iris.cube.Cube`. - * other: - An instance of :class:`iris.cube.Cube` or :class:`iris.coords.Coord`, - or a number or :class:`numpy.ndarray`. + When differencing two cubes, they must both have the same coordinate systems + and data resolution. - Kwargs: + When subtracting a coordinate from a cube, they must both share the same + number of elements along a shared axis. - * dim: - If supplying a coord with no match on the cube, you must supply - the dimension to process. - * in_place: - Whether to create a new Cube, or alter the given "cube". - - Returns: - An instance of :class:`iris.cube.Cube`. + Parameters + ---------- + cube : iris.cube.Cube + Cube from which to subtract. + other : iris.cube.Cube, iris.coords.Coord, number, numpy.ndarray or dask.array.Array + Object to subtract from the cube. + dim : int, optional + If `other` is a coord which does not exist on the cube, specify the + dimension to which it should be mapped. + in_place : bool, default=False + If `True`, alters the input cube. Otherwise a new cube is created. + + Returns + ------- + iris.cube.Cube + + Notes + ----- + This function maintains laziness when called; it does not realise data. + See more at :doc:`/userguide/real_and_lazy_data`. """ _assert_is_cube(cube) - new_dtype = _output_dtype(operator.sub, cube.dtype, _get_dtype(other), - in_place=in_place) + new_dtype = _output_dtype( + operator.sub, + cube.dtype, + second_dtype=_get_dtype(other), + in_place=in_place, + ) if in_place: - _inplace_common_checks(cube, other, 'subtraction') + _inplace_common_checks(cube, other, "subtraction") op = operator.isub else: op = operator.sub - return _add_subtract_common(op, 'subtract', cube, other, new_dtype, - dim=dim, in_place=in_place) - - -def _add_subtract_common(operation_function, operation_name, cube, other, - new_dtype, dim=None, in_place=False): - """ - Function which shares common code between addition and subtraction - of cubes. - - operation_function - function which does the operation - (e.g. numpy.subtract) - operation_name - the public name of the operation (e.g. 'divide') - cube - the cube whose data is used as the first argument - to `operation_function` - other - the cube, coord, ndarray or number whose data is - used as the second argument - new_dtype - the expected dtype of the output. Used in the - case of scalar masked arrays - dim - dimension along which to apply `other` if it's a - coordinate that is not found in `cube` - in_place - whether or not to apply the operation in place to - `cube` and `cube.data` + return _add_subtract_common( + op, "subtract", cube, other, new_dtype, dim=dim, in_place=in_place + ) + + +def _add_subtract_common( + operation_function, + operation_name, + cube, + other, + new_dtype, + dim=None, + in_place=False, +): + """Share common code between addition and subtraction of cubes. + + Parameters + ---------- + operation_function : + Function which does the operation (e.g. numpy.subtract). + operation_name : + The public name of the operation (e.g. 'divide'). + cube : + The cube whose data is used as the first argument to `operation_function`. + other : + The cube, coord, ndarray, dask array or number whose + data is used as the second argument. + new_dtype : + The expected dtype of the output. Used in the case of scalar + masked arrays. + dim : optional + Dimension along which to apply `other` if it's a coordinate that is not + found in `cube`. + in_place : bool, default=False + Whether or not to apply the operation in place to `cube` and `cube.data`. """ _assert_is_cube(cube) - _assert_matching_units(cube, other, operation_name) - - if isinstance(other, iris.cube.Cube): - # get a coordinate comparison of this cube and the cube to do the - # operation with - coord_comp = iris.analysis.coord_comparison(cube, other) - - bad_coord_grps = (coord_comp['ungroupable_and_dimensioned'] + - coord_comp['resamplable']) - if bad_coord_grps: - raise ValueError('This operation cannot be performed as there are ' - 'differing coordinates (%s) remaining ' - 'which cannot be ignored.' - % ', '.join({coord_grp.name() for coord_grp - in bad_coord_grps})) - else: - coord_comp = None - - new_cube = _binary_op_common(operation_function, operation_name, cube, - other, cube.units, new_dtype=new_dtype, - dim=dim, in_place=in_place) - if coord_comp: - # If a coordinate is to be ignored - remove it - ignore = filter(None, [coord_grp[0] for coord_grp - in coord_comp['ignorable']]) - for coord in ignore: - new_cube.remove_coord(coord) + if cube.units != getattr(other, "units", cube.units): + emsg = ( + f"Cannot use {operation_name!r} with differing units " + f"({cube.units} & {other.units})" + ) + raise iris.exceptions.NotYetImplementedError(emsg) + + result = _binary_op_common( + operation_function, + operation_name, + cube, + other, + cube.units, + new_dtype=new_dtype, + dim=dim, + in_place=in_place, + ) - return new_cube + return result +@_lenient_client(services=SERVICES) def multiply(cube, other, dim=None, in_place=False): - """ - Calculate the product of a cube and another cube or coordinate. - - Args: - - * cube: - An instance of :class:`iris.cube.Cube`. - * other: - An instance of :class:`iris.cube.Cube` or :class:`iris.coords.Coord`, - or a number or :class:`numpy.ndarray`. - - Kwargs: - - * dim: - If supplying a coord with no match on the cube, you must supply - the dimension to process. - - Returns: - An instance of :class:`iris.cube.Cube`. - + """Calculate the product. + + Calculate the product of two cubes, or the product of a cube and a coordinate + or array or scalar value. + + When multiplying two cubes, they must both have the same coordinate systems + and data resolution. + + When mulplying a cube by a coordinate, they must both share the same number + of elements along a shared axis. + + Parameters + ---------- + cube : iris.cube.Cube + First operand to multiply. + other : iris.cube.Cube, iris.coords.Coord, number, numpy.ndarray or dask.array.Array + Second operand to multiply. + dim : int, optional + If `other` is a coord which does not exist on the cube, specify the + dimension to which it should be mapped. + in_place : bool, default=False + If `True`, alters the input cube. Otherwise a new cube is created. + + Returns + ------- + iris.cube.Cube + + Notes + ----- + This function maintains laziness when called; it does not realise data. + See more at :doc:`/userguide/real_and_lazy_data`. """ _assert_is_cube(cube) - new_dtype = _output_dtype(operator.mul, cube.dtype, _get_dtype(other), - in_place=in_place) - other_unit = getattr(other, 'units', '1') + + new_dtype = _output_dtype( + operator.mul, + cube.dtype, + second_dtype=_get_dtype(other), + in_place=in_place, + ) + other_unit = getattr(other, "units", "1") new_unit = cube.units * other_unit + if in_place: - _inplace_common_checks(cube, other, 'multiplication') + _inplace_common_checks(cube, other, "multiplication") op = operator.imul else: op = operator.mul - if isinstance(other, iris.cube.Cube): - # get a coordinate comparison of this cube and the cube to do the - # operation with - coord_comp = iris.analysis.coord_comparison(cube, other) - bad_coord_grps = (coord_comp['ungroupable_and_dimensioned'] + - coord_comp['resamplable']) - if bad_coord_grps: - raise ValueError('This operation cannot be performed as there are ' - 'differing coordinates (%s) remaining ' - 'which cannot be ignored.' - % ', '.join({coord_grp.name() for coord_grp - in bad_coord_grps})) - else: - coord_comp = None - - new_cube = _binary_op_common(op, 'multiply', cube, other, new_unit, - new_dtype=new_dtype, dim=dim, - in_place=in_place) + result = _binary_op_common( + op, + "multiply", + cube, + other, + new_unit, + new_dtype=new_dtype, + dim=dim, + in_place=in_place, + ) - if coord_comp: - # If a coordinate is to be ignored - remove it - ignore = filter(None, [coord_grp[0] for coord_grp - in coord_comp['ignorable']]) - for coord in ignore: - new_cube.remove_coord(coord) - - return new_cube + return result def _inplace_common_checks(cube, other, math_op): - """ + """Check if an inplace math operation can take place. + Check whether an inplace math operation can take place between `cube` and `other`. It cannot if `cube` has integer data and `other` has float data as the operation will always produce float data that cannot be 'safely' @@ -436,88 +439,96 @@ def _inplace_common_checks(cube, other, math_op): """ other_dtype = _get_dtype(other) - if not np.can_cast(other_dtype, cube.dtype, 'same_kind'): - aemsg = ('Cannot perform inplace {} between {!r} ' - 'with {} data and {!r} with {} data.') - raise ArithmeticError( - aemsg.format(math_op, cube, cube.dtype, other, other_dtype)) + if not np.can_cast(other_dtype, cube.dtype, "same_kind"): + aemsg = ( + "Cannot perform inplace {} between {!r} " + "with {} data and {!r} with {} data." + ) + raise ArithmeticError( + aemsg.format(math_op, cube, cube.dtype, other, other_dtype) + ) +@_lenient_client(services=SERVICES) def divide(cube, other, dim=None, in_place=False): - """ - Calculate the division of a cube by a cube or coordinate. - - Args: - - * cube: - An instance of :class:`iris.cube.Cube`. - * other: - An instance of :class:`iris.cube.Cube` or :class:`iris.coords.Coord`, - or a number or :class:`numpy.ndarray`. - - Kwargs: - - * dim: - If supplying a coord with no match on the cube, you must supply - the dimension to process. - - Returns: - An instance of :class:`iris.cube.Cube`. + """Calculate the ratio. + + Calculate the ratio of two cubes, or the ratio of a cube and a coordinate + or array or scalar value. + + When dividing a cube by another cube, they must both have the same coordinate + systems and data resolution. + + When dividing a cube by a coordinate, they must both share the same number + of elements along a shared axis. + + Parameters + ---------- + cube : iris.cube.Cube + Numerator. + other : iris.cube.Cube, iris.coords.Coord, number, numpy.ndarray or dask.array.Array + Denominator. + dim : int, optional + If `other` is a coord which does not exist on the cube, specify the + dimension to which it should be mapped. + in_place : bool, default=False + If `True`, alters the input cube. Otherwise a new cube is created. + + Returns + ------- + iris.cube.Cube + + Notes + ----- + This function maintains laziness when called; it does not realise data. + See more at :doc:`/userguide/real_and_lazy_data`. """ _assert_is_cube(cube) - new_dtype = _output_dtype(operator.truediv, - cube.dtype, _get_dtype(other), in_place=in_place) - other_unit = getattr(other, 'units', '1') + + new_dtype = _output_dtype( + operator.truediv, + cube.dtype, + second_dtype=_get_dtype(other), + in_place=in_place, + ) + other_unit = getattr(other, "units", "1") new_unit = cube.units / other_unit + if in_place: - if cube.dtype.kind in 'iu': + if cube.dtype.kind in "iu": # Cannot coerce float result from inplace division back to int. - aemsg = ('Cannot perform inplace division of cube {!r} ' - 'with integer data.') - raise ArithmeticError(aemsg) + emsg = ( + f"Cannot perform inplace division of cube {cube.name()!r} " + "with integer data." + ) + raise ArithmeticError(emsg) op = operator.itruediv else: op = operator.truediv - if isinstance(other, iris.cube.Cube): - # get a coordinate comparison of this cube and the cube to do the - # operation with - coord_comp = iris.analysis.coord_comparison(cube, other) - bad_coord_grps = (coord_comp['ungroupable_and_dimensioned'] + - coord_comp['resamplable']) - if bad_coord_grps: - raise ValueError('This operation cannot be performed as there are ' - 'differing coordinates (%s) remaining ' - 'which cannot be ignored.' - % ', '.join({coord_grp.name() for coord_grp - in bad_coord_grps})) - else: - coord_comp = None - - new_cube = _binary_op_common(op, 'divide', cube, other, new_unit, - new_dtype=new_dtype, dim=dim, - in_place=in_place) + result = _binary_op_common( + op, + "divide", + cube, + other, + new_unit, + new_dtype=new_dtype, + dim=dim, + in_place=in_place, + ) - if coord_comp: - # If a coordinate is to be ignored - remove it - ignore = filter(None, [coord_grp[0] for coord_grp - in coord_comp['ignorable']]) - for coord in ignore: - new_cube.remove_coord(coord) - - return new_cube + return result def exponentiate(cube, exponent, in_place=False): - """ - Returns the result of the given cube to the power of a scalar. + """Return the result of the given cube to the power of a scalar. - Args: - - * cube: + Parameters + ---------- + cube : An instance of :class:`iris.cube.Cube`. - * exponent: + exponent : The integer or floating point exponent. .. note:: When applied to the cube's unit, the exponent must @@ -525,139 +536,168 @@ def exponentiate(cube, exponent, in_place=False): powers of the basic units. e.g. Unit('meter^-2 kilogram second^-1') - - Kwargs: - - * in_place: + in_place : bool, default=False Whether to create a new Cube, or alter the given "cube". - Returns: - An instance of :class:`iris.cube.Cube`. + Returns + ------- + :class:`iris.cube.Cube`. + + Notes + ----- + This function maintains laziness when called; it does not realise data. + See more at :doc:`/userguide/real_and_lazy_data`. """ _assert_is_cube(cube) - new_dtype = _output_dtype(operator.pow, cube.dtype, _get_dtype(exponent), - in_place=in_place) + new_dtype = _output_dtype( + operator.pow, + cube.dtype, + second_dtype=_get_dtype(exponent), + in_place=in_place, + ) if cube.has_lazy_data(): - def power(data): + + def power(data, out=None): return operator.pow(data, exponent) + else: + def power(data, out=None): return np.power(data, exponent, out) - return _math_op_common(cube, power, cube.units ** exponent, new_dtype, - in_place=in_place) + return _math_op_common( + cube, + power, + cube.units**exponent, + new_dtype=new_dtype, + in_place=in_place, + ) def exp(cube, in_place=False): - """ - Calculate the exponential (exp(x)) of the cube. - - Args: + """Calculate the exponential (exp(x)) of the cube. - * cube: + Parameters + ---------- + cube : An instance of :class:`iris.cube.Cube`. + in_place : bool, default=False + Whether to create a new Cube, or alter the given "cube". - .. note:: - - Taking an exponential will return a cube with dimensionless units. + Returns + ------- + :class:`iris.cube.Cube`. - Kwargs: + Notes + ----- + Taking an exponential will return a cube with dimensionless units. - * in_place: - Whether to create a new Cube, or alter the given "cube". - - Returns: - An instance of :class:`iris.cube.Cube`. + This function maintains laziness when called; it does not realise data. + See more at :doc:`/userguide/real_and_lazy_data`. """ _assert_is_cube(cube) new_dtype = _output_dtype(np.exp, cube.dtype, in_place=in_place) op = da.exp if cube.has_lazy_data() else np.exp - return _math_op_common(cube, op, cf_units.Unit('1'), new_dtype, - in_place=in_place) + return _math_op_common( + cube, op, cf_units.Unit("1"), new_dtype=new_dtype, in_place=in_place + ) def log(cube, in_place=False): - """ - Calculate the natural logarithm (base-e logarithm) of the cube. + """Calculate the natural logarithm (base-e logarithm) of the cube. - Args: - - * cube: + Parameters + ---------- + cube : An instance of :class:`iris.cube.Cube`. - - Kwargs: - - * in_place: + in_place : bool, default=False Whether to create a new Cube, or alter the given "cube". - Returns: - An instance of :class:`iris.cube.Cube`. + Returns + ------- + :class:`iris.cube.Cube` + + Notes + ----- + This function maintains laziness when called; it does not realise data. + See more at :doc:`/userguide/real_and_lazy_data`. """ _assert_is_cube(cube) new_dtype = _output_dtype(np.log, cube.dtype, in_place=in_place) op = da.log if cube.has_lazy_data() else np.log - return _math_op_common(cube, op, cube.units.log(math.e), new_dtype, - in_place=in_place) + return _math_op_common( + cube, + op, + cube.units.log(math.e), + new_dtype=new_dtype, + in_place=in_place, + ) def log2(cube, in_place=False): - """ - Calculate the base-2 logarithm of the cube. - - Args: + """Calculate the base-2 logarithm of the cube. - * cube: + Parameters + ---------- + cube : An instance of :class:`iris.cube.Cube`. - - Kwargs:lib/iris/tests/unit/analysis/maths/test_subtract.py - - * in_place: + in_place : bool, default=False Whether to create a new Cube, or alter the given "cube". - Returns: - An instance of :class:`iris.cube.Cube`. + Returns + ------- + :class:`iris.cube.Cube` + + Notes + ----- + This function maintains laziness when called; it does not realise data. + See more at :doc:`/userguide/real_and_lazy_data`. """ _assert_is_cube(cube) new_dtype = _output_dtype(np.log2, cube.dtype, in_place=in_place) op = da.log2 if cube.has_lazy_data() else np.log2 - return _math_op_common(cube, op, cube.units.log(2), new_dtype, - in_place=in_place) + return _math_op_common( + cube, op, cube.units.log(2), new_dtype=new_dtype, in_place=in_place + ) def log10(cube, in_place=False): - """ - Calculate the base-10 logarithm of the cube. - - Args: + """Calculate the base-10 logarithm of the cube. - * cube: + Parameters + ---------- + cube : An instance of :class:`iris.cube.Cube`. - - Kwargs: - - * in_place: + in_place : bool, default=False Whether to create a new Cube, or alter the given "cube". - Returns: - An instance of :class:`iris.cube.Cube`. + Returns + ------- + :class:`iris.cube.Cube`. + + Notes + ----- + This function maintains laziness when called; it does not realise data. + See more at :doc:`/userguide/real_and_lazy_data`. """ _assert_is_cube(cube) new_dtype = _output_dtype(np.log10, cube.dtype, in_place=in_place) op = da.log10 if cube.has_lazy_data() else np.log10 - return _math_op_common(cube, op, cube.units.log(10), new_dtype, - in_place=in_place) + return _math_op_common( + cube, op, cube.units.log(10), new_dtype=new_dtype, in_place=in_place + ) -def apply_ufunc(ufunc, cube, other_cube=None, new_unit=None, new_name=None, - in_place=False): - """ +def apply_ufunc(ufunc, cube, other=None, new_unit=None, new_name=None, in_place=False): + """Apply a `numpy universal function `_ to a cube. + Apply a `numpy universal function - `_ to a cube + `_ to a cube or pair of cubes. .. note:: Many of the numpy.ufunc have been implemented explicitly in Iris @@ -666,126 +706,214 @@ def apply_ufunc(ufunc, cube, other_cube=None, new_unit=None, new_name=None, It is usually preferable to use these functions rather than :func:`iris.analysis.maths.apply_ufunc` where possible. - Args: - - * ufunc: + Parameters + ---------- + ufunc : An instance of :func:`numpy.ufunc` e.g. :func:`numpy.sin`, :func:`numpy.mod`. - - * cube: + cube : An instance of :class:`iris.cube.Cube`. - - Kwargs: - - * other_cube: + other : :class:`iris.cube.Cube`, optional An instance of :class:`iris.cube.Cube` to be given as the second argument to :func:`numpy.ufunc`. - - * new_unit: + new_unit : optional Unit for the resulting Cube. - - * new_name: + new_name : optional Name for the resulting Cube. - - * in_place: + in_place : bool, default=False Whether to create a new Cube, or alter the given "cube". - Returns: - An instance of :class:`iris.cube.Cube`. + Returns + ------- + :class:`iris.cube.Cube`. - Example:: + Examples + -------- + :: cube = apply_ufunc(numpy.sin, cube, in_place=True) - """ + .. note:: + + This function maintains laziness when called; it does not realise data. This is dependent on `ufunc` argument + being a numpy operation that is compatible with lazy operation. + See more at :doc:`/userguide/real_and_lazy_data`. + """ if not isinstance(ufunc, np.ufunc): - name = getattr(ufunc, '__name__', 'function passed to apply_ufunc') + ufunc_name = getattr(ufunc, "__name__", "function passed to apply_ufunc") + emsg = f"{ufunc_name} is not recognised, it is not an instance of numpy.ufunc" + raise TypeError(emsg) - raise TypeError('{} is not recognised (it is not an instance of ' - 'numpy.ufunc)'.format(name)) + ufunc_name = ufunc.__name__ if ufunc.nout != 1: - raise ValueError('{} returns {} objects, apply_ufunc currently ' - 'only supports ufunc functions returning a single ' - 'object.'.format(ufunc.__name__, ufunc.nout)) - - if ufunc.nin == 2: - if other_cube is None: - raise ValueError('{} requires two arguments, so other_cube ' - 'must also be passed to apply_ufunc'.format( - ufunc.__name__)) + emsg = ( + f"{ufunc_name} returns {ufunc.nout} objects, apply_ufunc currently " + "only supports numpy.ufunc functions returning a single object." + ) + raise ValueError(emsg) + + if ufunc.nin == 1: + if other is not None: + dmsg = ( + "ignoring surplus 'other' argument to apply_ufunc, " + f"provided ufunc {ufunc_name!r} only requires 1 input" + ) + logger.debug(dmsg) - _assert_is_cube(other_cube) - new_dtype = _output_dtype(ufunc, cube.dtype, other_cube.dtype, - in_place=in_place) - - new_cube = _binary_op_common(ufunc, ufunc.__name__, cube, other_cube, - new_unit, new_dtype=new_dtype, - in_place=in_place) - - elif ufunc.nin == 1: new_dtype = _output_dtype(ufunc, cube.dtype, in_place=in_place) - new_cube = _math_op_common(cube, ufunc, new_unit, new_dtype, - in_place=in_place) - + new_cube = _math_op_common( + cube, ufunc, new_unit, new_dtype=new_dtype, in_place=in_place + ) + elif ufunc.nin == 2: + if other is None: + emsg = ( + f"{ufunc_name} requires two arguments, another cube " + "must also be passed to apply_ufunc." + ) + raise ValueError(emsg) + + _assert_is_cube(other) + new_dtype = _output_dtype( + ufunc, cube.dtype, second_dtype=other.dtype, in_place=in_place + ) + + new_cube = _binary_op_common( + ufunc, + ufunc_name, + cube, + other, + new_unit, + new_dtype=new_dtype, + in_place=in_place, + ) else: - raise ValueError(ufunc.__name__ + ".nin should be 1 or 2.") + emsg = f"Provided ufunc '{ufunc_name}.nin' must be 1 or 2." + raise ValueError(emsg) new_cube.rename(new_name) return new_cube -def _binary_op_common(operation_function, operation_name, cube, other, - new_unit, new_dtype=None, dim=None, in_place=False): - """ - Function which shares common code between binary operations. - - operation_function - function which does the operation - (e.g. numpy.divide) - operation_name - the public name of the operation (e.g. 'divide') - cube - the cube whose data is used as the first argument - to `operation_function` - other - the cube, coord, ndarray or number whose data is - used as the second argument - new_dtype - the expected dtype of the output. Used in the - case of scalar masked arrays - new_unit - unit for the resulting quantity - dim - dimension along which to apply `other` if it's a - coordinate that is not found in `cube` - in_place - whether or not to apply the operation in place to - `cube` and `cube.data` +def _binary_op_common( + operation_function, + operation_name, + cube, + other, + new_unit, + new_dtype=None, + dim=None, + in_place=False, + sanitise_metadata=True, +): + """Share common code between binary operations. + + Parameters + ---------- + operation_function : + Function which does the operation (e.g. numpy.divide). + operation_name : + The public name of the operation (e.g. 'divide'). + cube : + The cube whose data is used as the first argument to `operation_function`. + other : + The cube, coord, ndarray, dask array or number whose data is used + as the second argument. + new_unit : optional + Unit for the resulting quantity. + new_dtype : + The expected dtype of the output. Used in the case of scalar masked arrays. + dim : optional + Dimension along which to apply `other` if it's a coordinate that is + not found in `cube`. + in_place : bool, default=False + Whether or not to apply the operation in place to `cube` and `cube.data`. + sanitise_metadata : bool, default=True + Whether or not to remove metadata using _sanitise_metadata function. + """ + from iris.cube import Cube + _assert_is_cube(cube) + + # Flag to notify the _math_op_common function to simply wrap the resultant + # data of the maths operation in a cube with no metadata. + skeleton_cube = False + if isinstance(other, iris.coords.Coord): - other = _broadcast_cube_coord_data(cube, other, operation_name, dim) - elif isinstance(other, iris.cube.Cube): - try: - broadcast_shapes(cube.shape, other.shape) - except ValueError: - other = iris.util.as_compatible_shape(other, cube) - other = other.core_data() + # The rhs must be an array. + rhs = _broadcast_cube_coord_data(cube, other, operation_name, dim=dim) + elif isinstance(other, Cube): + # Prepare to resolve the cube operands and associated coordinate + # metadata into the resultant cube. + resolver = Resolve(cube, other) + + # Get the broadcast, auto-transposed safe versions of the cube operands. + cube = resolver.lhs_cube_resolved + other = resolver.rhs_cube_resolved + + # Flag that it's safe to wrap the resultant data of the math operation + # in a cube with no metadata, as all of the metadata of the resultant + # cube is being managed by the resolver. + skeleton_cube = True + + # The rhs must be an array. + rhs = other.core_data() else: - other = np.asanyarray(other) - - # don't worry about checking for other data types (such as scalars or - # np.ndarrays) because _assert_compatible validates that they are broadcast - # compatible with cube.data - _assert_compatible(cube, other) + # The rhs must be an array. + if iris._lazy_data.is_lazy_data(other): + rhs = other + else: + rhs = np.asanyarray(other) - def unary_func(x): - ret = operation_function(x, other) - if ret is NotImplemented: - # explicitly raise the TypeError, so it gets raised even if, for + def unary_func(lhs): + data = operation_function(lhs, rhs) + if data is NotImplemented: + # Explicitly raise the TypeError, so it gets raised even if, for # example, `iris.analysis.maths.multiply(cube, other)` is called - # directly instead of `cube * other` - raise TypeError('cannot %s %r and %r objects' % - (operation_function.__name__, type(x).__name__, - type(other).__name__)) - return ret - return _math_op_common(cube, unary_func, new_unit, new_dtype, in_place) + # directly instead of `cube * other`. + emsg = ( + f"Cannot {operation_function.__name__} {type(lhs).__name__!r} " + f"and {type(rhs).__name__} objects." + ) + raise TypeError(emsg) + return data + + if in_place and not cube.has_lazy_data(): + # In-place arithmetic doesn't work if array type of LHS is less complex + # than RHS. + if iris._lazy_data.is_lazy_data(rhs): + cube.data = cube.lazy_data() + elif ma.is_masked(rhs) and not isinstance(cube.data, ma.MaskedArray): + cube.data = ma.array(cube.data) + + elif isinstance(cube.core_data(), ma.MaskedArray) and iris._lazy_data.is_lazy_data( + rhs + ): + # Workaround for #2987. numpy#15200 discusses the general problem. + cube = cube.copy(cube.lazy_data()) + + result = _math_op_common( + cube, + unary_func, + new_unit, + new_dtype=new_dtype, + in_place=in_place, + skeleton_cube=skeleton_cube, + sanitise_metadata=sanitise_metadata, + ) + + if isinstance(other, Cube): + # Insert the resultant data from the maths operation + # within the resolved cube. + result = resolver.cube(result.core_data(), in_place=in_place) + if sanitise_metadata: + _sanitise_metadata(result, new_unit) + + return result def _broadcast_cube_coord_data(cube, other, operation_name, dim=None): @@ -803,16 +931,20 @@ def _broadcast_cube_coord_data(cube, other, operation_name, dim=None): coord_dims = cube.coord_dims(other) data_dimension = coord_dims[0] if coord_dims else None except iris.exceptions.CoordinateNotFoundError: - raise ValueError("Could not determine dimension for %s. " - "Use %s(cube, coord, dim=dim)" - % (operation_name, operation_name)) + raise ValueError( + "Could not determine dimension for %s. " + "Use %s(cube, coord, dim=dim)" % (operation_name, operation_name) + ) if other.ndim != 1: raise iris.exceptions.CoordinateMultiDimError(other) if other.has_bounds(): - warnings.warn('Using {!r} with a bounded coordinate is not well ' - 'defined; ignoring bounds.'.format(operation_name)) + warnings.warn( + "Using {!r} with a bounded coordinate is not well " + "defined; ignoring bounds.".format(operation_name), + category=iris.warnings.IrisIgnoringBoundsWarning, + ) points = other.points @@ -826,63 +958,106 @@ def _broadcast_cube_coord_data(cube, other, operation_name, dim=None): return points -def _math_op_common(cube, operation_function, new_unit, new_dtype=None, - in_place=False): +def _sanitise_metadata(cube, unit): + """Clear appropriate metadata from the resultant cube. + + As part of the maths metadata contract, clear the necessary or + unsupported metadata from the resultant cube of the maths operation. + + """ + # Clear the cube names. + cube.rename(None) + + # Clear the cube cell methods. + cube.cell_methods = None + + # Clear the cell measures. + for cm in cube.cell_measures(): + cube.remove_cell_measure(cm) + + # Clear the ancillary variables. + for av in cube.ancillary_variables(): + cube.remove_ancillary_variable(av) + + # Clear the STASH attribute, if present. + if "STASH" in cube.attributes: + del cube.attributes["STASH"] + + # Set the cube units. + cube.units = unit + + +def _math_op_common( + cube, + operation_function, + new_unit, + new_dtype=None, + in_place=False, + skeleton_cube=False, + sanitise_metadata=True, +): + from iris.cube import Cube + _assert_is_cube(cube) - if in_place: - new_cube = cube + if in_place and not skeleton_cube: if cube.has_lazy_data(): - new_cube.data = operation_function(cube.lazy_data()) + cube.data = operation_function(cube.lazy_data()) else: try: operation_function(cube.data, out=cube.data) except TypeError: - # Non ufunc function + # Non-ufunc function operation_function(cube.data) + new_cube = cube else: - new_cube = cube.copy(data=operation_function(cube.core_data())) - - # If the result of the operation is scalar and masked, we need to fix up - # the dtype - if new_dtype is not None \ - and not new_cube.has_lazy_data() \ - and new_cube.data.shape == () \ - and ma.is_masked(new_cube.data): + data = operation_function(cube.core_data()) + if skeleton_cube: + # Simply wrap the resultant data in a cube, as no + # cube metadata is required by the caller. + new_cube = Cube(data) + else: + new_cube = cube.copy(data) + + # If the result of the operation is scalar and masked, we need to fix-up the dtype. + if ( + new_dtype is not None + and not new_cube.has_lazy_data() + and new_cube.data.shape == () + and ma.is_masked(new_cube.data) + ): new_cube.data = ma.masked_array(0, 1, dtype=new_dtype) - iris.analysis.clear_phenomenon_identity(new_cube) - new_cube.units = new_unit - return new_cube + if sanitise_metadata: + _sanitise_metadata(new_cube, new_unit) + return new_cube -class IFunc(object): - """ - :class:`IFunc` class for functions that can be applied to an iris cube. - """ - def __init__(self, data_func, units_func): - """ - Create an ifunc from a data function and units function. - Args: +class IFunc: + """:class:`IFunc` class for functions that can be applied to an iris cube.""" - * data_func: + def __init__(self, data_func, units_func): + """Create an ifunc from a data function and units function. + Parameters + ---------- + data_func : Function to be applied to one or two data arrays, which are given as positional arguments. Should return another data array, with the same shape as the first array. - - Can also have keyword arguments. - - * units_func: - - Function to calculate the unit of the resulting cube. - Should take the cube(s) as input and return + May also have keyword arguments. + units_func : + Function to calculate the units of the resulting cube. + Should take the cube/s as input and return an instance of :class:`cf_units.Unit`. - Returns: - An ifunc. + Returns + ------- + ifunc + Examples + -------- **Example usage 1** Using an existing numpy ufunc, such as numpy.sin for the data function and a simple lambda function for the units function:: @@ -913,80 +1088,101 @@ def ws_units_func(u_cube, v_cube): cs_ifunc = iris.analysis.maths.IFunc(numpy.cumsum, lambda a: a.units) cs_cube = cs_ifunc(cube, axis=1) + """ + self._data_func_name = getattr( + data_func, "__name__", "data_func argument passed to IFunc" + ) + + if not callable(data_func): + emsg = f"{self._data_func_name} is not callable." + raise TypeError(emsg) - if hasattr(data_func, 'nin'): + self._unit_func_name = getattr( + units_func, "__name__", "units_func argument passed to IFunc" + ) + + if not callable(units_func): + emsg = f"{self._unit_func_name} is not callable." + raise TypeError(emsg) + + if hasattr(data_func, "nin"): self.nin = data_func.nin else: - if six.PY2: - (args, _, _, defaults) = inspect.getargspec(data_func) - self.nin = len(args) - ( - len(defaults) if defaults is not None else 0) - else: - sig = inspect.signature(data_func) - args = [param for param in sig.parameters.values() - if (param.kind != param.KEYWORD_ONLY and - param.default is param.empty)] - self.nin = len(args) + sig = inspect.signature(data_func) + args = [ + param + for param in sig.parameters.values() + if (param.kind != param.KEYWORD_ONLY and param.default is param.empty) + ] + self.nin = len(args) if self.nin not in [1, 2]: - msg = ('{} requires {} input data arrays, the IFunc class ' - 'currently only supports functions requiring 1 or two ' - 'data arrays as input.') - raise ValueError(msg.format(data_func.__name__, self.nin)) - - if hasattr(data_func, 'nout'): + emsg = ( + f"{self._data_func_name} requires {self.nin} input data " + "arrays, the IFunc class currently only supports functions " + "requiring 1 or 2 data arrays as input." + ) + raise ValueError(emsg) + + if hasattr(data_func, "nout"): if data_func.nout != 1: - msg = ('{} returns {} objects, the IFunc class currently ' - 'only supports functions returning a single object.') - raise ValueError(msg.format(data_func.__name__, - data_func.nout)) + emsg = ( + f"{self._data_func_name} returns {data_func.nout} objects, " + "the IFunc class currently only supports functions " + "returning a single object." + ) + raise ValueError(emsg) self.data_func = data_func - self.units_func = units_func def __repr__(self): - return 'iris.analysis.maths.IFunc({}, {})'.format( - self.data_func.__name__, self.units_func.__name__) + result = ( + f"iris.analysis.maths.IFunc({self._data_func_name}, " + f"{self._unit_func_name})" + ) + return result def __str__(self): - return ('IFunc constructed from the data function {} ' - 'and the units function {}'.format( - self.data_func.__name__, self.units_func.__name__)) - - def __call__(self, cube, other=None, dim=None, in_place=False, - new_name=None, **kwargs_data_func): - """ - Applies the ifunc to the cube(s). - - Args: - - * cube + result = ( + f"IFunc constructed from the data function {self._data_func_name} " + f"and the units function {self._unit_func_name}" + ) + return result + + def __call__( + self, + cube, + other=None, + dim=None, + in_place=False, + new_name=None, + **kwargs_data_func, + ): + """Apply the ifunc to the cube(s). + + Parameters + ---------- + cube : An instance of :class:`iris.cube.Cube`, whose data is used as the first argument to the data function. - - Kwargs: - - * other - A cube, coord, ndarray or number whose data is used as the + other : optional + A cube, coord, ndarray, dask array or number whose data is used as the second argument to the data function. - - * new_name: - Name for the resulting Cube. - - * in_place: - Whether to create a new Cube, or alter the given "cube". - - * dim: + dim : optional Dimension along which to apply `other` if it's a coordinate that is - not found in `cube` - - * **kwargs_data_func: + not found in `cube`. + in_place : bool, default=False + Whether to create a new Cube, or alter the given "cube". + new_name : optional + Name for the resulting Cube. + **kwargs_data_func : Keyword arguments that get passed on to the data_func. - Returns: - An instance of :class:`iris.cube.Cube`. + Returns + ------- + :class:`iris.cube.Cube` """ _assert_is_cube(cube) @@ -996,30 +1192,39 @@ def wrap_data_func(*args, **kwargs): return self.data_func(*args, **kwargs_combined) - if self.nin == 2: - if other is None: - raise ValueError(self.data_func.__name__ + - ' requires two arguments') - - new_unit = self.units_func(cube, other) - - new_cube = _binary_op_common(wrap_data_func, - self.data_func.__name__, cube, other, - new_unit, dim=dim, - in_place=in_place) - - elif self.nin == 1: + if self.nin == 1: if other is not None: - raise ValueError(self.data_func.__name__ + - ' requires one argument') + dmsg = ( + "ignoring surplus 'other' argument to IFunc.__call__, " + f"provided data_func {self._data_func_name!r} only requires " + "1 input" + ) + logger.debug(dmsg) new_unit = self.units_func(cube) - new_cube = _math_op_common(cube, wrap_data_func, new_unit, - in_place=in_place) - + new_cube = _math_op_common( + cube, wrap_data_func, new_unit, in_place=in_place + ) else: - raise ValueError('self.nin should be 1 or 2.') + if other is None: + emsg = ( + f"{self._data_func_name} requires two arguments, another " + "cube must also be passed to IFunc.__call__." + ) + raise ValueError(emsg) + + new_unit = self.units_func(cube, other) + + new_cube = _binary_op_common( + wrap_data_func, + self.data_func.__name__, + cube, + other, + new_unit, + dim=dim, + in_place=in_place, + ) if new_name is not None: new_cube.rename(new_name) diff --git a/lib/iris/analysis/stats.py b/lib/iris/analysis/stats.py index 3eda09c624..8df93571f1 100644 --- a/lib/iris/analysis/stats.py +++ b/lib/iris/analysis/stats.py @@ -1,97 +1,79 @@ -# (C) British Crown Copyright 2013 - 2019, Met Office +# Copyright Iris contributors # -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . -""" -Statistical operations between cubes. - -""" - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Statistical operations between cubes.""" +import dask.array as da import numpy as np -import numpy.ma as ma -import iris -from iris.util import broadcast_to_shape - - -def _ones_like(cube): - """ - Return a copy of cube with the same mask, but all data values set to 1. - - The operation is non-lazy. - """ - ones_cube = cube.copy() - ones_cube.data = np.ones_like(cube.data) - ones_cube.rename('unknown') - ones_cube.units = 1 - return ones_cube - -def pearsonr(cube_a, cube_b, corr_coords=None, weights=None, mdtol=1., - common_mask=False): - """ - Calculate the Pearson's r correlation coefficient over specified - dimensions. - - Args: - - * cube_a, cube_b (cubes): +import iris +from iris.common import SERVICES, Resolve +from iris.common.lenient import _lenient_client +from iris.util import _mask_array + + +@_lenient_client(services=SERVICES) +def pearsonr( + cube_a, + cube_b, + corr_coords=None, + weights=None, + mdtol=1.0, + common_mask=False, +): + """Calculate the Pearson's r correlation coefficient over specified dimensions. + + Parameters + ---------- + cube_a, cube_b : :class:`iris.cube.Cube` Cubes between which the correlation will be calculated. The cubes should either be the same shape and have the same dimension coordinates - or one cube should be broadcastable to the other. - * corr_coords (str or list of str): + or one cube should be broadcastable to the other. Broadcasting rules + are the same as those for cube arithmetic (see :ref:`cube maths`). + corr_coords : str or list of str, optional The cube coordinate name(s) over which to calculate correlations. If no names are provided then correlation will be calculated over all common cube dimensions. - * weights (numpy.ndarray, optional): - Weights array of same shape as (the smaller of) cube_a and cube_b. Note - that latitude/longitude area weights can be calculated using + weights : :class:`numpy.ndarray`, optional + Weights array of same shape as (the smaller of) `cube_a` and `cube_b`. + Note that latitude/longitude area weights can be calculated using :func:`iris.analysis.cartography.area_weights`. - * mdtol (float, optional): + mdtol : float, default=1.0 Tolerance of missing data. The missing data fraction is calculated - based on the number of grid cells masked in both cube_a and cube_b. If - this fraction exceed mdtol, the returned value in the corresponding - cell is masked. mdtol=0 means no missing data is tolerated while - mdtol=1 means the resulting element will be masked if and only if all - contributing elements are masked in cube_a or cube_b. Defaults to 1. - * common_mask (bool): - If True, applies a common mask to cube_a and cube_b so only cells which - are unmasked in both cubes contribute to the calculation. If False, the - variance for each cube is calculated from all available cells. Defaults - to False. - - Returns: + based on the number of grid cells masked in both `cube_a` and `cube_b`. + If this fraction exceed `mdtol`, the returned value in the + corresponding cell is masked. `mdtol` =0 means no missing data is + tolerated while `mdtol` =1 means the resulting element will be masked + if and only if all contributing elements are masked in `cube_a` or + `cube_b`. + common_mask : bool, default=False + If ``True``, applies a common mask to cube_a and cube_b so only cells + which are unmasked in both cubes contribute to the calculation. If + ``False``, the variance for each cube is calculated from all available + cells. + + Returns + ------- + :class:`~iris.cube.Cube` A cube of the correlation between the two input cubes along the specified dimensions, at each point in the remaining dimensions of the cubes. For example providing two time/altitude/latitude/longitude cubes and - corr_coords of 'latitude' and 'longitude' will result in a + `corr_coords` of 'latitude' and 'longitude' will result in a time/altitude cube describing the latitude/longitude (i.e. pattern) correlation at each time/altitude point. - Reference: - http://www.statsoft.com/textbook/glosp.html#Pearson%20Correlation + Notes + ----- + If either of the input cubes has lazy data, the result will have lazy data. - This operation is non-lazy. + Reference: + https://en.wikipedia.org/wiki/Pearson_correlation_coefficient """ - - # Assign larger cube to cube_1 + # Assign larger cube to cube_1 for simplicity. if cube_b.ndim > cube_a.ndim: cube_1 = cube_b cube_2 = cube_a @@ -99,72 +81,90 @@ def pearsonr(cube_a, cube_b, corr_coords=None, weights=None, mdtol=1., cube_1 = cube_a cube_2 = cube_b - dim_coords_1 = [coord.name() for coord in cube_1.dim_coords] - dim_coords_2 = [coord.name() for coord in cube_2.dim_coords] - common_dim_coords = list(set(dim_coords_1) & set(dim_coords_2)) + smaller_shape = cube_2.shape + + # Get the broadcast, auto-transposed safe versions of the cube operands. + resolver = Resolve(cube_1, cube_2) + lhs_cube_resolved = resolver.lhs_cube_resolved + rhs_cube_resolved = resolver.rhs_cube_resolved + + if lhs_cube_resolved.has_lazy_data() or rhs_cube_resolved.has_lazy_data(): + al = da + array_lhs = lhs_cube_resolved.lazy_data() + array_rhs = rhs_cube_resolved.lazy_data() + else: + al = np + array_lhs = lhs_cube_resolved.data + array_rhs = rhs_cube_resolved.data + # If no coords passed then set to all common dimcoords of cubes. if corr_coords is None: - corr_coords = common_dim_coords + dim_coords_1 = {coord.name() for coord in lhs_cube_resolved.dim_coords} + dim_coords_2 = {coord.name() for coord in rhs_cube_resolved.dim_coords} + corr_coords = list(dim_coords_1.intersection(dim_coords_2)) - smaller_shape = cube_2.shape + # Interpret coords as array dimensions. + corr_dims = set() + if isinstance(corr_coords, str): + corr_coords = [corr_coords] + for coord in corr_coords: + corr_dims.update(lhs_cube_resolved.coord_dims(coord)) + + corr_dims = tuple(corr_dims) # Match up data masks if required. if common_mask: - # Create a cube of 1's with a common mask. - if ma.is_masked(cube_2.data): - mask_cube = _ones_like(cube_2) - else: - mask_cube = 1. - if ma.is_masked(cube_1.data): - # Take a slice to avoid unnecessary broadcasting of cube_2. - slice_coords = [dim_coords_1[i] for i in range(cube_1.ndim) if - dim_coords_1[i] not in common_dim_coords and - np.array_equal(cube_1.data.mask.any(axis=i), - cube_1.data.mask.all(axis=i))] - cube_1_slice = next(cube_1.slices_over(slice_coords)) - mask_cube = _ones_like(cube_1_slice) * mask_cube - # Apply common mask to data. - if isinstance(mask_cube, iris.cube.Cube): - cube_1 = cube_1 * mask_cube - cube_2 = mask_cube * cube_2 - dim_coords_2 = [coord.name() for coord in cube_2.dim_coords] - - # Broadcast weights to shape of cubes if necessary. - if weights is None or cube_1.shape == smaller_shape: - weights_1 = weights - weights_2 = weights + mask_lhs = al.ma.getmaskarray(array_lhs) + if al is np: + # Reduce all invariant dimensions of mask_lhs to length 1. This avoids + # unnecessary broadcasting of array_rhs. + index = tuple( + slice(0, 1) + if np.array_equal(mask_lhs.any(axis=dim), mask_lhs.all(axis=dim)) + else slice(None) + for dim in range(mask_lhs.ndim) + ) + mask_lhs = mask_lhs[index] + + array_rhs = _mask_array(array_rhs, mask_lhs) + array_lhs = _mask_array(array_lhs, al.ma.getmaskarray(array_rhs)) + + # Broadcast weights to shape of arrays if necessary. + if weights is None: + weights_lhs = weights_rhs = None else: if weights.shape != smaller_shape: - raise ValueError("weights array should have dimensions {}". - format(smaller_shape)) - - dims_1_common = [i for i in range(cube_1.ndim) if - dim_coords_1[i] in common_dim_coords] - weights_1 = broadcast_to_shape(weights, cube_1.shape, dims_1_common) - if cube_2.shape != smaller_shape: - dims_2_common = [i for i in range(cube_2.ndim) if - dim_coords_2[i] in common_dim_coords] - weights_2 = broadcast_to_shape(weights, cube_2.shape, - dims_2_common) - else: - weights_2 = weights + msg = f"weights array should have dimensions {smaller_shape}" + raise ValueError(msg) + + wt_resolver = Resolve(cube_1, cube_2.copy(weights)) + weights = wt_resolver.rhs_cube_resolved.data + weights_rhs = np.broadcast_to(weights, array_rhs.shape) + weights_lhs = np.broadcast_to(weights, array_lhs.shape) # Calculate correlations. - s1 = cube_1 - cube_1.collapsed(corr_coords, iris.analysis.MEAN, - weights=weights_1) - s2 = cube_2 - cube_2.collapsed(corr_coords, iris.analysis.MEAN, - weights=weights_2) - - covar = (s1*s2).collapsed(corr_coords, iris.analysis.SUM, - weights=weights_1, mdtol=mdtol) - var_1 = (s1**2).collapsed(corr_coords, iris.analysis.SUM, - weights=weights_1) - var_2 = (s2**2).collapsed(corr_coords, iris.analysis.SUM, - weights=weights_2) - - denom = iris.analysis.maths.apply_ufunc(np.sqrt, var_1*var_2, - new_unit=covar.units) + s_lhs = array_lhs - al.ma.average( + array_lhs, axis=corr_dims, weights=weights_lhs, keepdims=True + ) + s_rhs = array_rhs - al.ma.average( + array_rhs, axis=corr_dims, weights=weights_rhs, keepdims=True + ) + + s_prod = resolver.cube(s_lhs * s_rhs) + + # Use cube collapsed method as it takes care of coordinate collapsing and missing + # data tolerance. + covar = s_prod.collapsed( + corr_coords, iris.analysis.SUM, weights=weights_lhs, mdtol=mdtol + ) + + var_lhs = iris.analysis._sum(s_lhs**2, axis=corr_dims, weights=weights_lhs) + var_rhs = iris.analysis._sum(s_rhs**2, axis=corr_dims, weights=weights_rhs) + + denom = np.sqrt(var_lhs * var_rhs) + corr_cube = covar / denom corr_cube.rename("Pearson's r") + corr_cube.units = 1 return corr_cube diff --git a/lib/iris/analysis/trajectory.py b/lib/iris/analysis/trajectory.py index 1f7d01372b..2111dd2504 100644 --- a/lib/iris/analysis/trajectory.py +++ b/lib/iris/analysis/trajectory.py @@ -1,46 +1,23 @@ -# (C) British Crown Copyright 2010 - 2018, Met Office +# Copyright Iris contributors # -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . -""" -Defines a Trajectory class, and a routine to extract a sub-cube along a -trajectory. - -""" - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa -import six +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Defines a Trajectory class, and a routine to extract a sub-cube along a trajectory.""" import math import numpy as np from scipy.spatial import cKDTree -import iris.analysis -import iris.coord_systems import iris.coords -from iris.analysis import Linear -from iris.analysis._interpolation import snapshot_grid -from iris.util import _meshgrid +class _Segment: + """A single trajectory line segment. + + Two points, as described in the Trajectory class. + """ -class _Segment(object): - """A single trajectory line segment: Two points, as described in the - Trajectory class.""" def __init__(self, p0, p1): # check keys if sorted(p0.keys()) != sorted(p1.keys()): @@ -56,14 +33,22 @@ def __init__(self, p0, p1): self.length = math.sqrt(squares) -class Trajectory(object): +class Trajectory: """A series of given waypoints with pre-calculated sample points.""" def __init__(self, waypoints, sample_count=10): - """ - Defines a trajectory using a sequence of waypoints. + """Define a trajectory using a sequence of waypoints. + + Parameters + ---------- + waypoints : + A sequence of dictionaries, mapping coordinate names to values. + sample_count : int, default=10 + The number of sample positions to use along the trajectory. - For example:: + Examples + -------- + :: waypoints = [{'latitude': 45, 'longitude': -60}, {'latitude': 45, 'longitude': 0}] @@ -72,29 +57,21 @@ def __init__(self, waypoints, sample_count=10): .. note:: All the waypoint dictionaries must contain the same coordinate names. - Args: - - * waypoints - A sequence of dictionaries, mapping coordinate names to values. - - Kwargs: - - * sample_count - The number of sample positions to use along the trajectory. - """ self.waypoints = waypoints self.sample_count = sample_count # create line segments from the waypoints - segments = [_Segment(self.waypoints[i], self.waypoints[i+1]) - for i in range(len(self.waypoints) - 1)] + segments = [ + _Segment(self.waypoints[i], self.waypoints[i + 1]) + for i in range(len(self.waypoints) - 1) + ] # calculate our total length self.length = sum([seg.length for seg in segments]) # generate our sampled points - #: The trajectory points, as dictionaries of {coord_name: value}. + # The trajectory points, as dictionaries of {coord_name: value}. self.sampled_points = [] sample_step = self.length / (self.sample_count - 1) @@ -103,7 +80,6 @@ def __init__(self, waypoints, sample_count=10): cur_seg = segments[cur_seg_i] len_accum = cur_seg.length for p in range(self.sample_count): - # calculate the sample position along our total length sample_at_len = p * sample_step @@ -115,52 +91,60 @@ def __init__(self, waypoints, sample_count=10): # how far through the segment is our sample point? seg_start_len = len_accum - cur_seg.length - seg_frac = (sample_at_len-seg_start_len) / cur_seg.length + seg_frac = (sample_at_len - seg_start_len) / cur_seg.length # sample each coordinate in this segment, to create a new # sampled point new_sampled_point = {} for key in cur_seg.pts[0].keys(): seg_coord_delta = cur_seg.pts[1][key] - cur_seg.pts[0][key] - new_sampled_point.update({key: cur_seg.pts[0][key] + - seg_frac*seg_coord_delta}) + new_sampled_point.update( + {key: cur_seg.pts[0][key] + seg_frac * seg_coord_delta} + ) # add this new sampled point self.sampled_points.append(new_sampled_point) def __repr__(self): - return 'Trajectory(%s, sample_count=%s)' % (self.waypoints, - self.sample_count) + return "Trajectory(%s, sample_count=%s)" % ( + self.waypoints, + self.sample_count, + ) def _get_interp_points(self): - """ - Translate `self.sampled_points` to the format expected by the - interpolator. + """Translate `self.sampled_points` to the format expected by the interpolator. - Returns: + Returns + ------- + `self.sampled points` `self.sampled points` in the format required by `:func:`~iris.analysis.trajectory.interpolate`. """ - points = {k: [point_dict[k] for point_dict in self.sampled_points] - for k in self.sampled_points[0].keys()} + points = { + k: [point_dict[k] for point_dict in self.sampled_points] + for k in self.sampled_points[0].keys() + } return [(k, v) for k, v in points.items()] def _src_cube_anon_dims(self, cube): - """ + """Locate the index of anonymous dimensions. + A helper method to locate the index of anonymous dimensions on the interpolation target, ``cube``. - Returns: - The index of any anonymous dimensions in ``cube``. + Returns + ------- + The index of any anonymous dimensions in ``cube``. """ named_dims = [cube.coord_dims(c)[0] for c in cube.dim_coords] return list(set(range(cube.ndim)) - set(named_dims)) def interpolate(self, cube, method=None): - """ - Calls :func:`~iris.analysis.trajectory.interpolate` to interpolate + """Interpolate ``cube`` on the defined trajectory. + + Call :func:`~iris.analysis.trajectory.interpolate` to interpolate ``cube`` on the defined trajectory. Assumes that the coordinate names supplied in the waypoints @@ -168,14 +152,11 @@ def interpolate(self, cube, method=None): supplied in the same coord_system as in `cube`, where appropriate (i.e. for horizontal coordinate points). - Args: - - * cube + Parameters + ---------- + cube : The source Cube to interpolate. - - Kwargs: - - * method: + method : optional The interpolation method to use; "linear" (default) or "nearest". Only nearest is available when specifying multi-dimensional coordinates. @@ -188,52 +169,55 @@ def interpolate(self, cube, method=None): if len(interpolated_cube.dim_coords) < interpolated_cube.ndim: # Add a new coord `index` to describe the new dimension created by # interpolating. - index_coord = iris.coords.DimCoord(range(self.sample_count), - long_name='index') + index_coord = iris.coords.DimCoord( + range(self.sample_count), long_name="index" + ) # Make sure anonymous dims in `cube` do not mistakenly get labelled # as the new `index` dimension created by interpolating. src_anon_dims = self._src_cube_anon_dims(cube) interp_anon_dims = self._src_cube_anon_dims(interpolated_cube) - anon_dim_index, = list(set(interp_anon_dims) - set(src_anon_dims)) + (anon_dim_index,) = list(set(interp_anon_dims) - set(src_anon_dims)) # Add the new coord to the interpolated cube. interpolated_cube.add_dim_coord(index_coord, anon_dim_index) return interpolated_cube def interpolate(cube, sample_points, method=None): - """ - Extract a sub-cube at the given n-dimensional points. - - Args: + """Extract a sub-cube at the given n-dimensional points. - * cube + Parameters + ---------- + cube : The source Cube. - - * sample_points + sample_points : A sequence of coordinate (name) - values pairs. - - Kwargs: - - * method + method : optional Request "linear" interpolation (default) or "nearest" neighbour. Only nearest neighbour is available when specifying multi-dimensional coordinates. - - For example:: + Examples + -------- + :: sample_points = [('latitude', [45, 45, 45]), ('longitude', [-60, -50, -40])] interpolated_cube = interpolate(cube, sample_points) + Notes + ----- + This function does not maintain laziness when called; it realises data. + See more at :doc:`/userguide/real_and_lazy_data`. """ + from iris.analysis import Linear + if method not in [None, "linear", "nearest"]: raise ValueError("Unhandled interpolation specified : %s" % method) # Convert any coordinate names to coords points = [] for coord, values in sample_points: - if isinstance(coord, six.string_types): + if isinstance(coord, str): coord = cube.coord(coord) points.append((coord, values)) sample_points = points @@ -243,7 +227,7 @@ def interpolate(cube, sample_points, method=None): trajectory_size = len(values) for coord, values in sample_points[1:]: if len(values) != trajectory_size: - raise ValueError('Lengths of coordinate values are inconsistent.') + raise ValueError("Lengths of coordinate values are inconsistent.") # Which dimensions are we squishing into the last dimension? squish_my_dims = set() @@ -255,8 +239,9 @@ def interpolate(cube, sample_points, method=None): # Derive the new cube's shape by filtering out all the dimensions we're # about to sample, # and then adding a new dimension to accommodate all the sample points. - remaining = [(dim, size) for dim, size in enumerate(cube.shape) if dim - not in squish_my_dims] + remaining = [ + (dim, size) for dim, size in enumerate(cube.shape) if dim not in squish_my_dims + ] new_data_shape = [size for dim, size in remaining] new_data_shape.append(trajectory_size) @@ -296,13 +281,16 @@ def interpolate(cube, sample_points, method=None): src_dims = cube.coord_dims(coord) if not squish_my_dims.isdisjoint(src_dims): points = np.array([coord.points.flatten()[0]] * trajectory_size) - new_coord = iris.coords.AuxCoord(points, - standard_name=coord.standard_name, - long_name=coord.long_name, - units=coord.units, - bounds=None, - attributes=coord.attributes, - coord_system=coord.coord_system) + new_coord = iris.coords.AuxCoord( + points, + var_name=coord.var_name, + standard_name=coord.standard_name, + long_name=coord.long_name, + units=coord.units, + bounds=None, + attributes=coord.attributes, + coord_system=coord.coord_system, + ) new_cube.add_aux_coord(new_coord, trajectory_dim) coord_mapping[id(coord)] = new_coord @@ -313,32 +301,75 @@ def interpolate(cube, sample_points, method=None): for coord, values in sample_points: if coord.ndim > 1: if method == "linear": - msg = "Cannot currently perform linear interpolation for " \ - "multi-dimensional coordinates." + msg = ( + "Cannot currently perform linear interpolation for " + "multi-dimensional coordinates." + ) raise iris.exceptions.CoordinateMultiDimError(msg) method = "nearest" break if method in ["linear", None]: - for i in range(trajectory_size): - point = [(coord, values[i]) for coord, values in sample_points] - column = cube.interpolate(point, Linear()) - new_cube.data[..., i] = column.data - # Fill in the empty squashed (non derived) coords. - for column_coord in column.dim_coords + column.aux_coords: - src_dims = cube.coord_dims(column_coord) - if not squish_my_dims.isdisjoint(src_dims): - if len(column_coord.points) != 1: - msg = "Expected to find exactly one point. Found {}." - raise Exception(msg.format(column_coord.points)) - new_cube.coord(column_coord.name()).points[i] = \ - column_coord.points[0] + # Using cube.interpolate will generate extra values that we don't need + # as it makes a grid from the provided coordinates (like a meshgrid) + # and then does interpolation for all of them. This is memory + # inefficient, but significantly more time efficient than calling + # cube.interpolate (or the underlying method on the interpolator) + # repeatedly, so using this approach for now. In future, it would be + # ideal if we only interpolated at the points we care about + columns = cube.interpolate(sample_points, Linear()) + # np.einsum(a, [0, 0], [0]) is like np.diag(a) + # We're using einsum here to do an n-dimensional diagonal, leaving the + # other dimensions unaffected and putting the diagonal's direction on + # the final axis + initial_inds = list(range(1, columns.ndim + 1)) + for ind in squish_my_dims: + initial_inds[ind] = 0 + final_inds = list(filter(lambda x: x != 0, initial_inds)) + [0] + new_cube.data = np.einsum(columns.data, initial_inds, final_inds) + + # Fill in the empty squashed (non derived) coords. + # We're using the same einstein summation plan as for the cube, but + # redoing those indices to match the indices in the coordinates + for columns_coord in columns.dim_coords + columns.aux_coords: + src_dims = cube.coord_dims(columns_coord) + if not squish_my_dims.isdisjoint(src_dims): + # Mapping the cube indices onto the coord + initial_coord_inds = [initial_inds[ind] for ind in src_dims] + # Making the final ones the same way as for the cube + # 0 will always appear in the initial ones because we know this + # coord overlaps the squish dims + final_coord_inds = list( + filter(lambda x: x != 0, initial_coord_inds) + ) + [0] + new_coord_points = np.einsum( + columns_coord.points, initial_coord_inds, final_coord_inds + ) + # Check we're not overwriting coord.points with the wrong shape + if ( + not new_cube.coord(columns_coord.name()).points.shape + == new_coord_points.shape + ): + msg = ( + "Coord {} was expected to have new points of shape {}. " + "Found shape of {}." + ) + raise ValueError( + msg.format( + columns_coord.name(), + new_cube.coord(columns_coord.name()).points.shape, + new_coord_points.shape, + ) + ) + # Replace the points + new_cube.coord(columns_coord.name()).points = new_coord_points elif method == "nearest": # Use a cache with _nearest_neighbour_indices_ndcoords() cache = {} column_indexes = _nearest_neighbour_indices_ndcoords( - cube, sample_points, cache=cache) + cube, sample_points, cache=cache + ) # Construct "fancy" indexes, so we can create the result data array in # a single numpy indexing operation. @@ -349,8 +380,7 @@ def interpolate(cube, sample_points, method=None): n_index_length = len(column_indexes[0]) dims_reduced = [False] * n_index_length for i_ind in range(n_index_length): - contents = [column_index[i_ind] - for column_index in column_indexes] + contents = [column_index[i_ind] for column_index in column_indexes] each_used = [content != slice(None) for content in contents] if np.all(each_used): # This dimension is addressed : use a list of indices. @@ -369,8 +399,10 @@ def interpolate(cube, sample_points, method=None): region_slice = slice(None) else: # Should really never happen, if _ndcoords is right. - msg = ('Internal error in trajectory interpolation : point ' - 'selection indices should all have the same form.') + msg = ( + "Internal error in trajectory interpolation : point " + "selection indices should all have the same form." + ) raise ValueError(msg) fancy_source_indices.append(fancy_index) @@ -391,45 +423,35 @@ def interpolate(cube, sample_points, method=None): # Make a list of dims with the reduced ones last. dims_reduced = np.array(dims_reduced) dims_order = np.arange(n_index_length) - dims_order = np.concatenate((dims_order[~dims_reduced], - dims_order[dims_reduced])) + dims_order = np.concatenate( + (dims_order[~dims_reduced], dims_order[dims_reduced]) + ) # Rearrange the data dimensions and the fancy indices into that order. source_data = source_data.transpose(dims_order) - fancy_source_indices = [fancy_source_indices[i_dim] - for i_dim in dims_order] + fancy_source_indices = [fancy_source_indices[i_dim] for i_dim in dims_order] # Apply the fancy indexing to get all the result data points. - source_data = source_data[tuple(fancy_source_indices)] - - # "Fix" problems with missing datapoints producing odd values - # when copied from a masked into an unmasked array. - # TODO: proper masked data handling. - if np.ma.isMaskedArray(source_data): - # This is **not** proper mask handling, because we cannot produce a - # masked result, but it ensures we use a "filled" version of the - # input in this case. - source_data = source_data.filled() - new_cube.data[:] = source_data - # NOTE: we assign to "new_cube.data[:]" and *not* just "new_cube.data", - # because the existing code produces a default dtype from 'np.empty' - # instead of preserving the input dtype. - # TODO: maybe this should be fixed -- i.e. to preserve input dtype ?? + new_cube.data = source_data[tuple(fancy_source_indices)] # Fill in the empty squashed (non derived) coords. - column_coords = [coord - for coord in cube.dim_coords + cube.aux_coords - if not squish_my_dims.isdisjoint( - cube.coord_dims(coord))] - new_cube_coords = [new_cube.coord(column_coord.name()) - for column_coord in column_coords] + column_coords = [ + coord + for coord in cube.dim_coords + cube.aux_coords + if not squish_my_dims.isdisjoint(cube.coord_dims(coord)) + ] + new_cube_coords = [ + new_cube.coord(column_coord.name()) for column_coord in column_coords + ] all_point_indices = np.array(column_indexes) single_point_test_cube = cube[column_indexes[0]] for new_cube_coord, src_coord in zip(new_cube_coords, column_coords): # Check structure of the indexed coord (at one selected point). point_coord = single_point_test_cube.coord(src_coord) if len(point_coord.points) != 1: - msg = ('Coord {} at one x-y position has the shape {}, ' - 'instead of being a single point. ') + msg = ( + "Coord {} at one x-y position has the shape {}, " + "instead of being a single point. " + ) raise ValueError(msg.format(src_coord.name(), src_coord.shape)) # Work out which indices apply to the input coord. @@ -437,12 +459,12 @@ def interpolate(cube, sample_points, method=None): # single point for each coord, but this is very inefficient. # So here, we translate cube indexes into *coord* indexes. src_coord_dims = cube.coord_dims(src_coord) - fancy_coord_index_arrays = [list(all_point_indices[:, src_dim]) - for src_dim in src_coord_dims] + fancy_coord_index_arrays = [ + list(all_point_indices[:, src_dim]) for src_dim in src_coord_dims + ] # Fill the new coord with all the correct points from the old one. - new_cube_coord.points = src_coord.points[ - tuple(fancy_coord_index_arrays)] + new_cube_coord.points = src_coord.points[tuple(fancy_coord_index_arrays)] # NOTE: the new coords do *not* have bounds. return new_cube @@ -457,22 +479,22 @@ def _ll_to_cart(lon, lat): def _cartesian_sample_points(sample_points, sample_point_coord_names): - """ - Replace geographic lat/lon with cartesian xyz. + """Replace geographic lat/lon with cartesian xyz. + Generates coords suitable for nearest point calculations with `scipy.spatial.cKDTree`. - Args: + Parameters + ---------- + sample_points : + [coord][datum] list of sample_positions for each datum, formatted for + fast use of :func:`_ll_to_cart()`. + sample_point_coord_names : + [coord] list of n coord names. - * sample_points[coord][datum]: - list of sample_positions for each datum, formatted for fast use of - :func:`_ll_to_cart()`. - - * sample_point_coord_names[coord]: - list of n coord names - - Returns: - list of [x,y,z,t,etc] positions, formatted for kdtree. + Returns + ------- + list of [x,y,z,t,etc] positions, formatted for kdtree. """ # Find lat and lon coord indices @@ -508,8 +530,9 @@ def _cartesian_sample_points(sample_points, sample_point_coord_names): def _nearest_neighbour_indices_ndcoords(cube, sample_points, cache=None): - """ - Returns the indices to select the data value(s) closest to the given + """Calculate the cube nearest neighbour indices for the samples. + + Return the indices to select the data value(s) closest to the given coordinate point values. 'sample_points' is of the form [[coord-or-coord-name, point-value(s)]*]. @@ -521,7 +544,7 @@ def _nearest_neighbour_indices_ndcoords(cube, sample_points, cache=None): Because this function can be slow for multidimensional coordinates, a 'cache' dictionary can be provided by the calling code. - .. Note:: + .. note:: If the points are longitudes/latitudes, these are handled correctly as points on the sphere, but the values must be in 'degrees'. @@ -536,8 +559,10 @@ def _nearest_neighbour_indices_ndcoords(cube, sample_points, cache=None): try: coord, value = sample_points[0] except (KeyError, ValueError): - emsg = ('Sample points must be a list of ' - '(coordinate, value) pairs, got {!r}.') + emsg = ( + "Sample points must be a list of " + "(coordinate, value) pairs, got {!r}." + ) raise TypeError(emsg.format(sample_points)) # Convert names to coords in sample_point and reformat sample point values @@ -549,8 +574,10 @@ def _nearest_neighbour_indices_ndcoords(cube, sample_points, cache=None): for coord, value in sample_points: coord = cube.coord(coord) if id(coord) not in ok_coord_ids: - msg = ('Invalid sample coordinate {!r}: derived coordinates are' - ' not allowed.'.format(coord.name())) + msg = ( + "Invalid sample coordinate {!r}: derived coordinates are" + " not allowed.".format(coord.name()) + ) raise ValueError(msg) sample_point_coords.append(coord) sample_point_coord_names.append(coord.name()) @@ -559,7 +586,7 @@ def _nearest_neighbour_indices_ndcoords(cube, sample_points, cache=None): coord_point_lens = np.array([len(value) for value in coord_values]) if not np.all(coord_point_lens == coord_point_lens[0]): - msg = 'All coordinates must have the same number of sample points.' + msg = "All coordinates must have the same number of sample points." raise ValueError(msg) coord_values = np.array(coord_values) @@ -584,32 +611,30 @@ def _nearest_neighbour_indices_ndcoords(cube, sample_points, cache=None): sample_space_cube.remove_coord(coord) # Order the sample point coords according to the sample space cube coords. - sample_space_coord_names = \ - [coord.name() for coord in sample_space_cube.coords()] - new_order = [sample_space_coord_names.index(name) - for name in sample_point_coord_names] + sample_space_coord_names = [coord.name() for coord in sample_space_cube.coords()] + new_order = [ + sample_space_coord_names.index(name) for name in sample_point_coord_names + ] coord_values = np.array([coord_values[i] for i in new_order]) sample_point_coord_names = [sample_point_coord_names[i] for i in new_order] - sample_space_coords = \ - sample_space_cube.dim_coords + sample_space_cube.aux_coords - sample_space_coords_and_dims = \ - [(coord, sample_space_cube.coord_dims(coord)) - for coord in sample_space_coords] + sample_space_coords = sample_space_cube.dim_coords + sample_space_cube.aux_coords + sample_space_coords_and_dims = [ + (coord, sample_space_cube.coord_dims(coord)) for coord in sample_space_coords + ] if cache is not None and cube in cache: kdtree = cache[cube] else: # Create a "sample space position" for each # `datum.sample_space_data_positions[coord_index][datum_index]`. - sample_space_data_positions = \ - np.empty((len(sample_space_coords_and_dims), - sample_space_cube.data.size), - dtype=float) + sample_space_data_positions = np.empty( + (len(sample_space_coords_and_dims), sample_space_cube.data.size), + dtype=float, + ) for d, ndi in enumerate(np.ndindex(sample_space_cube.data.shape)): - for c, (coord, coord_dims) in \ - enumerate(sample_space_coords_and_dims): - # Index of this datum along this coordinate (could be nD). + for c, (coord, coord_dims) in enumerate(sample_space_coords_and_dims): + # Index of this datum along this coordinate (could be n-D). if coord_dims: keys = tuple(ndi[ind] for ind in coord_dims) else: @@ -618,9 +643,9 @@ def _nearest_neighbour_indices_ndcoords(cube, sample_points, cache=None): sample_space_data_positions[c][d] = coord.points[keys] # Convert to cartesian coordinates. Flatten for kdtree compatibility. - cartesian_space_data_coords = \ - _cartesian_sample_points(sample_space_data_positions, - sample_point_coord_names) + cartesian_space_data_coords = _cartesian_sample_points( + sample_space_data_positions, sample_point_coord_names + ) # Create a kdtree for the nearest-distance lookup to these 3d points. kdtree = cKDTree(cartesian_space_data_coords) @@ -635,14 +660,16 @@ def _nearest_neighbour_indices_ndcoords(cube, sample_points, cache=None): # If there is no latlon within the coordinate there will be no change. # Otherwise, geographic latlon is replaced with cartesian xyz. cartesian_sample_points = _cartesian_sample_points( - coord_values, sample_point_coord_names) + coord_values, sample_point_coord_names + ) # Use kdtree to get the nearest sourcepoint index for each target point. _, datum_index_lists = kdtree.query(cartesian_sample_points) # Convert flat indices back into multidimensional sample-space indices. sample_space_dimension_indices = np.unravel_index( - datum_index_lists, sample_space_cube.data.shape) + datum_index_lists, sample_space_cube.data.shape + ) # Convert this from "pointwise list of index arrays for each dimension", # to "list of cube indices for each point". sample_space_ndis = np.array(sample_space_dimension_indices).transpose() @@ -671,8 +698,9 @@ def _nearest_neighbour_indices_ndcoords(cube, sample_points, cache=None): return result -class UnstructuredNearestNeigbourRegridder(object): - """ +class UnstructuredNearestNeigbourRegridder: + """Encapsulate the operation of :meth:`iris.analysis.trajectory.interpolate`. + Encapsulate the operation of :meth:`iris.analysis.trajectory.interpolate` with given source and target grids. @@ -680,40 +708,44 @@ class UnstructuredNearestNeigbourRegridder(object): regridding scheme. """ - # TODO: cache the necessary bits of the operation so re-use can actually + + # TODO: cache the necessary bits of the operation so reuse can actually # be more efficient. def __init__(self, src_cube, target_grid_cube): - """ + """Nearest-neighbour regridder. + A nearest-neighbour regridder to perform regridding from the source grid to the target grid. This can then be applied to any source data with the same structure as the original 'src_cube'. - Args: - - * src_cube: + Parameters + ---------- + src_cube : :class:`~iris.cube.Cube` The :class:`~iris.cube.Cube` defining the source grid. The X and Y coordinates can have any shape, but must be mapped over the same cube dimensions. - - * target_grid_cube: + target_grid_cube : :class:`~iris.cube.Cube` A :class:`~iris.cube.Cube`, whose X and Y coordinates specify a desired target grid. The X and Y coordinates must be one-dimensional dimension coordinates, mapped to different dimensions. All other cube components are ignored. - Returns: - regridder : (object) + Returns + ------- + regridder (object) + A callable object with the interface:: - A callable object with the interface: - `result_cube = regridder(data)` + result_cube = regridder(data) where `data` is a cube with the same grid as the original `src_cube`, that is to be regridded to the `target_grid_cube`. - .. Note:: + Notes + ----- + .. note:: For latitude-longitude coordinates, the nearest-neighbour distances are computed on the sphere, otherwise flat Euclidean distances are @@ -726,6 +758,9 @@ def __init__(self, src_cube, target_grid_cube): have the same units in the source and grid cubes. """ + from iris.analysis._interpolation import snapshot_grid + from iris.util import _meshgrid + # Make a copy of the source cube, so we can convert coordinate units. src_cube = src_cube.copy() @@ -733,15 +768,13 @@ def __init__(self, src_cube, target_grid_cube): tgt_x_coord, tgt_y_coord = snapshot_grid(target_grid_cube) # Check that the source has unique X and Y coords over common dims. - if (not src_cube.coords(axis='x') or not src_cube.coords(axis='y')): - msg = 'Source cube must have X- and Y-axis coordinates.' + if not src_cube.coords(axis="x") or not src_cube.coords(axis="y"): + msg = "Source cube must have X- and Y-axis coordinates." raise ValueError(msg) - src_x_coord = src_cube.coord(axis='x') - src_y_coord = src_cube.coord(axis='y') - if (src_cube.coord_dims(src_x_coord) != - src_cube.coord_dims(src_y_coord)): - msg = ('Source cube X and Y coordinates must have the same ' - 'cube dimensions.') + src_x_coord = src_cube.coord(axis="x") + src_y_coord = src_cube.coord(axis="y") + if src_cube.coord_dims(src_x_coord) != src_cube.coord_dims(src_y_coord): + msg = "Source cube X and Y coordinates must have the same cube dimensions." raise ValueError(msg) # Record *copies* of the original grid coords, in the desired @@ -754,16 +787,22 @@ def __init__(self, src_cube, target_grid_cube): coords_all = [src_x_coord, src_y_coord, tgt_x_coord, tgt_y_coord] cs = coords_all[0].coord_system if not all(coord.coord_system == cs for coord in coords_all): - msg = ('Source and target cube X and Y coordinates must all have ' - 'the same coordinate system.') + msg = ( + "Source and target cube X and Y coordinates must all have " + "the same coordinate system." + ) raise ValueError(msg) # Check *all* X and Y coords are lats+lons, if any are. - latlons = ['latitude' in coord.name() or 'longitude' in coord.name() - for coord in coords_all] + latlons = [ + "latitude" in coord.name() or "longitude" in coord.name() + for coord in coords_all + ] if any(latlons) and not all(latlons): - msg = ('If any X and Y coordinates are latitudes/longitudes, ' - 'then they all must be.') + msg = ( + "If any X and Y coordinates are latitudes/longitudes, " + "then they all must be." + ) raise ValueError(msg) self.grid_is_latlon = any(latlons) @@ -772,18 +811,23 @@ def __init__(self, src_cube, target_grid_cube): # N.B. already copied the target grid, so the result matches that. for coord in coords_all: try: - coord.convert_units('degrees') + coord.convert_units("degrees") except ValueError: - msg = ('Coordinate {!r} has units of {!r}, which does not ' - 'convert to "degrees".') - raise ValueError(msg.format(coord.name(), - str(coord.units))) + msg = ( + "Coordinate {!r} has units of {!r}, which does not " + 'convert to "degrees".' + ) + raise ValueError(msg.format(coord.name(), str(coord.units))) else: # Check that source and target have the same X and Y units. - if (src_x_coord.units != tgt_x_coord.units or - src_y_coord.units != tgt_y_coord.units): - msg = ('Source and target cube X and Y coordinates must ' - 'have the same units.') + if ( + src_x_coord.units != tgt_x_coord.units + or src_y_coord.units != tgt_y_coord.units + ): + msg = ( + "Source and target cube X and Y coordinates must " + "have the same units." + ) raise ValueError(msg) # Record the resulting grid shape. @@ -792,34 +836,42 @@ def __init__(self, src_cube, target_grid_cube): # Calculate sample points as 2d arrays, like broadcast (NY,1)*(1,NX). x_2d, y_2d = _meshgrid(tgt_x_coord.points, tgt_y_coord.points) # Cast as a "trajectory", to suit the method used. - self.trajectory = ((tgt_x_coord.name(), x_2d.flatten()), - (tgt_y_coord.name(), y_2d.flatten())) + self.trajectory = ( + (tgt_x_coord.name(), x_2d.flatten()), + (tgt_y_coord.name(), y_2d.flatten()), + ) def __call__(self, src_cube): # Check the source cube X and Y coords match the original. # Note: for now, this is sufficient to ensure a valid trajectory - # interpolation, but if in future we save + re-use the cache context + # interpolation, but if in future we save and reuse the cache context # for the 'interpolate' call, we may need more checks here. # Check the given cube against the original. - x_cos = src_cube.coords(axis='x') - y_cos = src_cube.coords(axis='y') - if (not x_cos or not y_cos or - y_cos != [self.src_grid_coords[0]] or - x_cos != [self.src_grid_coords[1]]): - msg = ('The given cube is not defined on the same source ' - 'grid as this regridder.') + x_cos = src_cube.coords(axis="x") + y_cos = src_cube.coords(axis="y") + if ( + not x_cos + or not y_cos + or y_cos != [self.src_grid_coords[0]] + or x_cos != [self.src_grid_coords[1]] + ): + msg = ( + "The given cube is not defined on the same source " + "grid as this regridder." + ) raise ValueError(msg) # Convert source XY coordinates to degrees if required. if self.grid_is_latlon: src_cube = src_cube.copy() - src_cube.coord(axis='x').convert_units('degrees') - src_cube.coord(axis='y').convert_units('degrees') + src_cube.coord(axis="x").convert_units("degrees") + src_cube.coord(axis="y").convert_units("degrees") # Get the basic interpolated results. - result_trajectory_cube = interpolate(src_cube, self.trajectory, - method='nearest') + result_trajectory_cube = interpolate( + src_cube, self.trajectory, method="nearest" + ) # Reconstruct this as a cube "like" the source data. # TODO: handle all aux-coords, cell measures ?? diff --git a/lib/iris/aux_factory.py b/lib/iris/aux_factory.py index 8e14245214..41e1e9f573 100644 --- a/lib/iris/aux_factory.py +++ b/lib/iris/aux_factory.py @@ -1,40 +1,25 @@ -# (C) British Crown Copyright 2010 - 2019, Met Office +# Copyright Iris contributors # -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . -""" -Definitions of derived coordinates. - -""" - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa -import six +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Definitions of derived coordinates.""" -from abc import ABCMeta, abstractmethod, abstractproperty +from abc import ABCMeta, abstractmethod import warnings +import cf_units import dask.array as da import numpy as np -from iris._cube_coord_common import CFVariableMixin +from iris._lazy_data import concatenate +from iris.common import CFVariableMixin, CoordMetadata, metadata_manager_factory import iris.coords +from iris.warnings import IrisIgnoringBoundsWarning -class AuxCoordFactory(six.with_metaclass(ABCMeta, CFVariableMixin)): - """ +class AuxCoordFactory(CFVariableMixin, metaclass=ABCMeta): + """Represents a "factory" which can manufacture additional auxiliary coordinate. + Represents a "factory" which can manufacture an additional auxiliary coordinate on demand, by combining the values of other coordinates. @@ -48,92 +33,119 @@ class AuxCoordFactory(six.with_metaclass(ABCMeta, CFVariableMixin)): """ def __init__(self): + # Configure the metadata manager. + if not hasattr(self, "_metadata_manager"): + self._metadata_manager = metadata_manager_factory(CoordMetadata) + #: Descriptive name of the coordinate made by the factory self.long_name = None #: netCDF variable name for the coordinate made by the factory self.var_name = None - #: Coordinate system (if any) of the coordinate made by the factory self.coord_system = None + # See the climatological property getter. + self._metadata_manager.climatological = False + + @property + def coord_system(self): + """The coordinate-system (if any) of the coordinate made by the factory.""" + return self._metadata_manager.coord_system + + @coord_system.setter + def coord_system(self, value): + self._metadata_manager.coord_system = value + + @property + def climatological(self): + """Return False, as a factory itself can never have points/bounds. + + Always returns False, as a factory itself can never have points/bounds + and therefore can never be climatological by definition. - @abstractproperty - def dependencies(self): """ - Returns a dictionary mapping from constructor argument names to + return self._metadata_manager.climatological + + @property + @abstractmethod + def dependencies(self): + """Return a dict mapping from constructor argument. + + Return a dictionary mapping from constructor argument names to the corresponding coordinates. """ - def _as_defn(self): - defn = iris.coords.CoordDefn( - self.standard_name, self.long_name, - self.var_name, self.units, - self.attributes, - self.coord_system, - # Slot for Coord 'climatological' property, which this - # doesn't have. - False,) - return defn - @abstractmethod def make_coord(self, coord_dims_func): - """ - Returns a new :class:`iris.coords.AuxCoord` as defined by this - factory. + """Return a new :class:`iris.coords.AuxCoord` as defined by this factory. - Args: - - * coord_dims_func: + Parameters + ---------- + coord_dims_func : A callable which can return the list of dimensions relevant to a given coordinate. + See :meth:`iris.cube.Cube.coord_dims()`. """ - @abstractmethod def update(self, old_coord, new_coord=None): - """ - Notifies the factory of a removal/replacement of a dependency. + """Notify the factory of the removal/replacement of a coordinate. - Args: + Notify the factory of the removal/replacement of a coordinate + which might be a dependency. - * old_coord: - The dependency coordinate to be removed/replaced. - * new_coord: - If None, the dependency using old_coord is removed, otherwise - the dependency is updated to use new_coord. + Parameters + ---------- + old_coord : + The coordinate to be removed/replaced. + new_coord : optional + If None, any dependency using old_coord is removed, otherwise + any dependency using old_coord is updated to use new_coord. """ + new_dependencies = self.dependencies + for name, coord in self.dependencies.items(): + if old_coord is coord: + new_dependencies[name] = new_coord + try: + self._check_dependencies(**new_dependencies) + except ValueError as e: + msg = "Failed to update dependencies. " + str(e) + raise ValueError(msg) + else: + setattr(self, name, new_coord) + break def __repr__(self): def arg_text(item): key, coord = item - return '{}={}'.format(key, str(coord and repr(coord.name()))) + return "{}={}".format(key, str(coord and repr(coord.name()))) + items = sorted(self.dependencies.items(), key=lambda item: item[0]) args = map(arg_text, items) - return '<{}({})>'.format(type(self).__name__, ', '.join(args)) + return "<{}({})>".format(type(self).__name__, ", ".join(args)) def derived_dims(self, coord_dims_func): - """ - Returns the cube dimensions for the derived coordinate. - - Args: + """Return the cube dimensions for the derived coordinate. - * coord_dims_func: - A callable which can return the list of dimensions relevant - to a given coordinate. + Parameters + ---------- + coord_dims_func : + A callable which can return the list of dimensions relevant to a given + coordinate. See :meth:`iris.cube.Cube.coord_dims()`. - Returns: - - A sorted list of cube dimension numbers. + Returns + ------- + A sorted list of cube dimension numbers. """ # Which dimensions are relevant? # e.g. If sigma -> [1] and orog -> [2, 3] then result = [1, 2, 3] derived_dims = set() - for coord in six.itervalues(self.dependencies): + for coord in self.dependencies.values(): if coord: derived_dims.update(coord_dims_func(coord)) @@ -143,47 +155,44 @@ def derived_dims(self, coord_dims_func): return derived_dims def updated(self, new_coord_mapping): - """ - Creates a new instance of this factory where the dependencies - are replaced according to the given mapping. + """Create a new instance of this factory. - Args: + Create a new instance of this factory where the dependencies + are replaced according to the given mapping. - * new_coord_mapping: + Parameters + ---------- + new_coord_mapping : A dictionary mapping from the object IDs potentially used by this factory, to the coordinate objects that should be used instead. """ new_dependencies = {} - for key, coord in six.iteritems(self.dependencies): + for key, coord in self.dependencies.items(): if coord: coord = new_coord_mapping[id(coord)] new_dependencies[key] = coord return type(self)(**new_dependencies) def xml_element(self, doc): - """ - Returns a DOM element describing this coordinate factory. - - """ - element = doc.createElement('coordFactory') - for key, coord in six.iteritems(self.dependencies): + """Return a DOM element describing this coordinate factory.""" + element = doc.createElement("coordFactory") + for key, coord in self.dependencies.items(): element.setAttribute(key, coord._xml_id()) element.appendChild(self.make_coord().xml_element(doc)) return element def _dependency_dims(self, coord_dims_func): dependency_dims = {} - for key, coord in six.iteritems(self.dependencies): + for key, coord in self.dependencies.items(): if coord: dependency_dims[key] = coord_dims_func(coord) return dependency_dims @staticmethod def _nd_bounds(coord, dims, ndim): - """ - Return a lazy bounds array for a dependency coordinate, 'coord'. + """Return a lazy bounds array for a dependency coordinate, 'coord'. The result is aligned to the first 'ndim' cube dimensions, and expanded to the full ('ndim'+1)-dimensional shape. @@ -193,12 +202,14 @@ def _nd_bounds(coord, dims, ndim): The extra final result dimension ('ndim'-th) is the bounds dimension. - Example: + Example:: coord.shape == (70,) coord.nbounds = 2 dims == [3] ndim == 5 - results in: + + results in:: + nd_bounds.shape == (1, 1, 1, 70, 1, 2) """ @@ -218,8 +229,7 @@ def _nd_bounds(coord, dims, ndim): @staticmethod def _nd_points(coord, dims, ndim): - """ - Return a lazy points array for a dependency coordinate, 'coord'. + """Return a lazy points array for a dependency coordinate, 'coord'. The result is aligned to the first 'ndim' cube dimensions, and expanded to the full 'ndim'-dimensional shape. @@ -227,11 +237,15 @@ def _nd_points(coord, dims, ndim): The value of 'ndim' must be >= the highest cube dimension of the dependency coordinate. - Example: + Examples + -------- + :: coord.shape == (4, 3) dims == [3, 2] ndim == 5 - results in: + + results in:: + nd_points.shape == (1, 1, 3, 4, 1) """ @@ -257,8 +271,7 @@ def _nd_points(coord, dims, ndim): return points def _remap(self, dependency_dims, derived_dims): - """ - Return a mapping from dependency names to coordinate points arrays. + """Return a mapping from dependency names to coordinate points arrays. For dependencies that are present, the values are all expanded and aligned to the same dimensions, which is the full set of all the @@ -273,7 +286,7 @@ def _remap(self, dependency_dims, derived_dims): ndim = 1 nd_points_by_key = {} - for key, coord in six.iteritems(self.dependencies): + for key, coord in self.dependencies.items(): if coord: # Get the points as consistent with the Cube. nd_points = self._nd_points(coord, dependency_dims[key], ndim) @@ -282,8 +295,9 @@ def _remap(self, dependency_dims, derived_dims): # derived coord. NB. These are always in Cube-order, so # no transpose is needed. if derived_dims: - keys = tuple(slice(None) if dim in derived_dims else 0 for - dim in range(ndim)) + keys = tuple( + slice(None) if dim in derived_dims else 0 for dim in range(ndim) + ) nd_points = nd_points[keys] else: # If no coord, treat value as zero. @@ -295,8 +309,7 @@ def _remap(self, dependency_dims, derived_dims): return nd_points_by_key def _remap_with_bounds(self, dependency_dims, derived_dims): - """ - Return a mapping from dependency names to coordinate bounds arrays. + """Return a mapping from dependency names to coordinate bounds arrays. For dependencies that are present, the values are all expanded and aligned to the same dimensions, which is the full set of all the @@ -315,15 +328,13 @@ def _remap_with_bounds(self, dependency_dims, derived_dims): ndim = 1 nd_values_by_key = {} - for key, coord in six.iteritems(self.dependencies): + for key, coord in self.dependencies.items(): if coord: # Get the bounds or points as consistent with the Cube. if coord.nbounds: - nd_values = self._nd_bounds(coord, dependency_dims[key], - ndim) + nd_values = self._nd_bounds(coord, dependency_dims[key], ndim) else: - nd_values = self._nd_points(coord, dependency_dims[key], - ndim) + nd_values = self._nd_points(coord, dependency_dims[key], ndim) # Restrict to just the dimensions relevant to the # derived coord. NB. These are always in Cube-order, so @@ -354,82 +365,262 @@ def _remap_with_bounds(self, dependency_dims, derived_dims): return nd_values_by_key -class HybridHeightFactory(AuxCoordFactory): - """ - Defines a hybrid-height coordinate factory with the formula: - z = a + b * orog +class AtmosphereSigmaFactory(AuxCoordFactory): + """Define an atmosphere sigma coordinate factory with the following formula. + + .. math:: + p = ptop + sigma * (ps - ptop) """ - def __init__(self, delta=None, sigma=None, orography=None): + + def __init__(self, pressure_at_top=None, sigma=None, surface_air_pressure=None): + """Create an atmosphere sigma coordinate factory with a formula. + + .. math:: + p(n, k, j, i) = pressure_at_top + sigma(k) * + (surface_air_pressure(n, j, i) - pressure_at_top) + + """ + # Configure the metadata manager. + self._metadata_manager = metadata_manager_factory(CoordMetadata) + super().__init__() + + # Check that provided coordinates meet necessary conditions. + self._check_dependencies(pressure_at_top, sigma, surface_air_pressure) + + # Initialize instance attributes + self.units = pressure_at_top.units + self.pressure_at_top = pressure_at_top + self.sigma = sigma + self.surface_air_pressure = surface_air_pressure + self.standard_name = "air_pressure" + self.attributes = {} + + @staticmethod + def _check_dependencies(pressure_at_top, sigma, surface_air_pressure): + """Check for sufficient coordinates.""" + if any( + [ + pressure_at_top is None, + sigma is None, + surface_air_pressure is None, + ] + ): + raise ValueError( + "Unable to construct atmosphere sigma coordinate factory due " + "to insufficient source coordinates" + ) + + # Check dimensions + if pressure_at_top.shape not in ((), (1,)): + raise ValueError( + f"Expected scalar 'pressure_at_top' coordinate, got shape " + f"{pressure_at_top.shape}" + ) + + # Check bounds + if sigma.nbounds not in (0, 2): + raise ValueError( + f"Invalid 'sigma' coordinate: must have either 0 or 2 bounds, " + f"got {sigma.nbounds:d}" + ) + for coord in (pressure_at_top, surface_air_pressure): + if coord.nbounds: + msg = ( + f"Coordinate '{coord.name()}' has bounds. These will " + "be disregarded" + ) + warnings.warn(msg, category=IrisIgnoringBoundsWarning, stacklevel=2) + + # Check units + if sigma.units.is_unknown(): + # Be graceful, and promote unknown to dimensionless units. + sigma.units = cf_units.Unit("1") + if not sigma.units.is_dimensionless(): + raise ValueError( + f"Invalid units: 'sigma' must be dimensionless, got " f"'{sigma.units}'" + ) + if pressure_at_top.units != surface_air_pressure.units: + raise ValueError( + f"Incompatible units: 'pressure_at_top' and " + f"'surface_air_pressure' must have the same units, got " + f"'{pressure_at_top.units}' and " + f"'{surface_air_pressure.units}'" + ) + if not pressure_at_top.units.is_convertible("Pa"): + raise ValueError( + "Invalid units: 'pressure_at_top' and 'surface_air_pressure' " + "must have units of pressure" + ) + + @property + def dependencies(self): + """Return dependencies.""" + dependencies = { + "pressure_at_top": self.pressure_at_top, + "sigma": self.sigma, + "surface_air_pressure": self.surface_air_pressure, + } + return dependencies + + @staticmethod + def _derive(pressure_at_top, sigma, surface_air_pressure): + """Derive coordinate.""" + return pressure_at_top + sigma * (surface_air_pressure - pressure_at_top) + + def make_coord(self, coord_dims_func): + """Return a new :class:`iris.coords.AuxCoord` as defined by this factory. + + Parameters + ---------- + coord_dims_func : + A callable which can return the list of dimensions relevant + to a given coordinate. + + See :meth:`iris.cube.Cube.coord_dims()`. + """ - Creates a hybrid-height coordinate factory with the formula: + # Which dimensions are relevant? + derived_dims = self.derived_dims(coord_dims_func) + dependency_dims = self._dependency_dims(coord_dims_func) + + # Build the points array + nd_points_by_key = self._remap(dependency_dims, derived_dims) + points = self._derive( + nd_points_by_key["pressure_at_top"], + nd_points_by_key["sigma"], + nd_points_by_key["surface_air_pressure"], + ) + + # Bounds + bounds = None + if self.sigma.nbounds: + nd_values_by_key = self._remap_with_bounds(dependency_dims, derived_dims) + pressure_at_top = nd_values_by_key["pressure_at_top"] + sigma = nd_values_by_key["sigma"] + surface_air_pressure = nd_values_by_key["surface_air_pressure"] + ok_bound_shapes = [(), (1,), (2,)] + if sigma.shape[-1:] not in ok_bound_shapes: + raise ValueError("Invalid sigma coordinate bounds") + if pressure_at_top.shape[-1:] not in [(), (1,)]: + warnings.warn( + "Pressure at top coordinate has bounds. These are being " + "disregarded", + category=IrisIgnoringBoundsWarning, + ) + pressure_at_top_pts = nd_points_by_key["pressure_at_top"] + bds_shape = list(pressure_at_top_pts.shape) + [1] + pressure_at_top = pressure_at_top_pts.reshape(bds_shape) + if surface_air_pressure.shape[-1:] not in [(), (1,)]: + warnings.warn( + "Surface pressure coordinate has bounds. These are being " + "disregarded", + category=IrisIgnoringBoundsWarning, + ) + surface_air_pressure_pts = nd_points_by_key["surface_air_pressure"] + bds_shape = list(surface_air_pressure_pts.shape) + [1] + surface_air_pressure = surface_air_pressure_pts.reshape(bds_shape) + bounds = self._derive(pressure_at_top, sigma, surface_air_pressure) + + # Create coordinate + return iris.coords.AuxCoord( + points, + standard_name=self.standard_name, + long_name=self.long_name, + var_name=self.var_name, + units=self.units, + bounds=bounds, + attributes=self.attributes, + coord_system=self.coord_system, + ) + + +class HybridHeightFactory(AuxCoordFactory): + """Defines a hybrid-height coordinate factory.""" + + def __init__(self, delta=None, sigma=None, orography=None): + """Create a hybrid-height coordinate factory with the following formula. + + .. math:: z = a + b * orog At least one of `delta` or `orography` must be provided. - Args: - - * delta: Coord + Parameters + ---------- + delta : Coord, optional The coordinate providing the `a` term. - * sigma: Coord + sigma : Coord, optional The coordinate providing the `b` term. - * orography: Coord + orography : Coord, optional The coordinate providing the `orog` term. """ - super(HybridHeightFactory, self).__init__() + # Configure the metadata manager. + self._metadata_manager = metadata_manager_factory(CoordMetadata) + super().__init__() if delta and delta.nbounds not in (0, 2): - raise ValueError('Invalid delta coordinate: must have either 0 or' - ' 2 bounds.') + raise ValueError( + "Invalid delta coordinate: must have either 0 or 2 bounds." + ) if sigma and sigma.nbounds not in (0, 2): - raise ValueError('Invalid sigma coordinate: must have either 0 or' - ' 2 bounds.') + raise ValueError( + "Invalid sigma coordinate: must have either 0 or 2 bounds." + ) if orography and orography.nbounds: - msg = 'Orography coordinate {!r} has bounds.' \ - ' These will be disregarded.'.format(orography.name()) - warnings.warn(msg, UserWarning, stacklevel=2) + msg = ( + "Orography coordinate {!r} has bounds." + " These will be disregarded.".format(orography.name()) + ) + warnings.warn(msg, category=IrisIgnoringBoundsWarning, stacklevel=2) self.delta = delta self.sigma = sigma self.orography = orography - self.standard_name = 'altitude' + self.standard_name = "altitude" if delta is None and orography is None: - raise ValueError('Unable to determine units: no delta or orography' - ' available.') + emsg = "Unable to determine units: no delta or orography available." + raise ValueError(emsg) if delta and orography and delta.units != orography.units: - raise ValueError('Incompatible units: delta and orography must' - ' have the same units.') + emsg = "Incompatible units: delta and orography must have the same units." + raise ValueError(emsg) self.units = (delta and delta.units) or orography.units - if not self.units.is_convertible('m'): - raise ValueError('Invalid units: delta and/or orography' - ' must be expressed in length units.') - self.attributes = {'positive': 'up'} + if not self.units.is_convertible("m"): + emsg = ( + "Invalid units: delta and/or orography must be expressed " + "in length units." + ) + raise ValueError(emsg) + self.attributes = {"positive": "up"} @property def dependencies(self): - """ - Returns a dictionary mapping from constructor argument names to + """Return a dict mapping from constructor arg names to coordinates. + + Return a dictionary mapping from constructor argument names to the corresponding coordinates. """ - return {'delta': self.delta, 'sigma': self.sigma, - 'orography': self.orography} + return { + "delta": self.delta, + "sigma": self.sigma, + "orography": self.orography, + } def _derive(self, delta, sigma, orography): return delta + sigma * orography def make_coord(self, coord_dims_func): - """ - Returns a new :class:`iris.coords.AuxCoord` as defined by this - factory. - - Args: + """Return a new :class:`iris.coords.AuxCoord` as defined by this factory. - * coord_dims_func: + Parameters + ---------- + coord_dims_func : A callable which can return the list of dimensions relevant to a given coordinate. + See :meth:`iris.cube.Cube.coord_dims()`. """ @@ -439,148 +630,163 @@ def make_coord(self, coord_dims_func): # Build the points array. nd_points_by_key = self._remap(dependency_dims, derived_dims) - points = self._derive(nd_points_by_key['delta'], - nd_points_by_key['sigma'], - nd_points_by_key['orography']) + points = self._derive( + nd_points_by_key["delta"], + nd_points_by_key["sigma"], + nd_points_by_key["orography"], + ) bounds = None - if ((self.delta and self.delta.nbounds) or - (self.sigma and self.sigma.nbounds)): + if (self.delta and self.delta.nbounds) or (self.sigma and self.sigma.nbounds): # Build the bounds array. - nd_values_by_key = self._remap_with_bounds(dependency_dims, - derived_dims) - delta = nd_values_by_key['delta'] - sigma = nd_values_by_key['sigma'] - orography = nd_values_by_key['orography'] + nd_values_by_key = self._remap_with_bounds(dependency_dims, derived_dims) + delta = nd_values_by_key["delta"] + sigma = nd_values_by_key["sigma"] + orography = nd_values_by_key["orography"] ok_bound_shapes = [(), (1,), (2,)] if delta.shape[-1:] not in ok_bound_shapes: - raise ValueError('Invalid delta coordinate bounds.') + raise ValueError("Invalid delta coordinate bounds.") if sigma.shape[-1:] not in ok_bound_shapes: - raise ValueError('Invalid sigma coordinate bounds.') + raise ValueError("Invalid sigma coordinate bounds.") if orography.shape[-1:] not in [(), (1,)]: - warnings.warn('Orography coordinate has bounds. ' - 'These are being disregarded.', - UserWarning, stacklevel=2) - orography_pts = nd_points_by_key['orography'] + warnings.warn( + "Orography coordinate has bounds. These are being disregarded.", + category=IrisIgnoringBoundsWarning, + stacklevel=2, + ) + orography_pts = nd_points_by_key["orography"] bds_shape = list(orography_pts.shape) + [1] orography = orography_pts.reshape(bds_shape) bounds = self._derive(delta, sigma, orography) - hybrid_height = iris.coords.AuxCoord(points, - standard_name=self.standard_name, - long_name=self.long_name, - var_name=self.var_name, - units=self.units, - bounds=bounds, - attributes=self.attributes, - coord_system=self.coord_system) + hybrid_height = iris.coords.AuxCoord( + points, + standard_name=self.standard_name, + long_name=self.long_name, + var_name=self.var_name, + units=self.units, + bounds=bounds, + attributes=self.attributes, + coord_system=self.coord_system, + ) return hybrid_height def update(self, old_coord, new_coord=None): - """ - Notifies the factory of the removal/replacement of a coordinate - which might be a dependency. + """Notify the factory of the removal/replacement of a coordinate. - Args: + Notify the factory of the removal/replacement of a coordinate + which might be a dependency. - * old_coord: + Parameters + ---------- + old_coord : The coordinate to be removed/replaced. - * new_coord: + new_coord : optional If None, any dependency using old_coord is removed, otherwise any dependency using old_coord is updated to use new_coord. """ if self.delta is old_coord: if new_coord and new_coord.nbounds not in (0, 2): - raise ValueError('Invalid delta coordinate:' - ' must have either 0 or 2 bounds.') + raise ValueError( + "Invalid delta coordinate: must have either 0 or 2 bounds." + ) self.delta = new_coord elif self.sigma is old_coord: if new_coord and new_coord.nbounds not in (0, 2): - raise ValueError('Invalid sigma coordinate:' - ' must have either 0 or 2 bounds.') + raise ValueError( + "Invalid sigma coordinate: must have either 0 or 2 bounds." + ) self.sigma = new_coord elif self.orography is old_coord: if new_coord and new_coord.nbounds: - msg = 'Orography coordinate {!r} has bounds.' \ - ' These will be disregarded.'.format(new_coord.name()) - warnings.warn(msg, UserWarning, stacklevel=2) + msg = ( + "Orography coordinate {!r} has bounds." + " These will be disregarded.".format(new_coord.name()) + ) + warnings.warn(msg, category=IrisIgnoringBoundsWarning, stacklevel=2) self.orography = new_coord class HybridPressureFactory(AuxCoordFactory): - """ - Defines a hybrid-pressure coordinate factory with the formula: - p = ap + b * ps + """Define a hybrid-pressure coordinate factory.""" - """ def __init__(self, delta=None, sigma=None, surface_air_pressure=None): - """ - Creates a hybrid-height coordinate factory with the formula: + """Create a hybrid-height coordinate factory with the following formula. + + .. math:: p = ap + b * ps At least one of `delta` or `surface_air_pressure` must be provided. - Args: - - * delta: Coord + Parameters + ---------- + delta : Coord, optional The coordinate providing the `ap` term. - * sigma: Coord + sigma : Coord, optional The coordinate providing the `b` term. - * surface_air_pressure: Coord + surface_air_pressure : Coord, optional The coordinate providing the `ps` term. """ - super(HybridPressureFactory, self).__init__() + # Configure the metadata manager. + self._metadata_manager = metadata_manager_factory(CoordMetadata) + super().__init__() # Check that provided coords meet necessary conditions. self._check_dependencies(delta, sigma, surface_air_pressure) + self.units = (delta and delta.units) or surface_air_pressure.units self.delta = delta self.sigma = sigma self.surface_air_pressure = surface_air_pressure - self.standard_name = 'air_pressure' + self.standard_name = "air_pressure" self.attributes = {} - @property - def units(self): - if self.delta is not None: - units = self.delta.units - else: - units = self.surface_air_pressure.units - return units - @staticmethod - def _check_dependencies(delta, sigma, - surface_air_pressure): + def _check_dependencies(delta, sigma, surface_air_pressure): # Check for sufficient coordinates. - if (delta is None and (sigma is None or - surface_air_pressure is None)): - msg = 'Unable to contruct hybrid pressure coordinate factory ' \ - 'due to insufficient source coordinates.' + if delta is None and (sigma is None or surface_air_pressure is None): + msg = ( + "Unable to construct hybrid pressure coordinate factory " + "due to insufficient source coordinates." + ) raise ValueError(msg) # Check bounds. if delta and delta.nbounds not in (0, 2): - raise ValueError('Invalid delta coordinate: must have either 0 or' - ' 2 bounds.') + raise ValueError( + "Invalid delta coordinate: must have either 0 or 2 bounds." + ) if sigma and sigma.nbounds not in (0, 2): - raise ValueError('Invalid sigma coordinate: must have either 0 or' - ' 2 bounds.') + raise ValueError( + "Invalid sigma coordinate: must have either 0 or 2 bounds." + ) if surface_air_pressure and surface_air_pressure.nbounds: - msg = 'Surface pressure coordinate {!r} has bounds. These will' \ - ' be disregarded.'.format(surface_air_pressure.name()) - warnings.warn(msg, UserWarning, stacklevel=2) + msg = ( + "Surface pressure coordinate {!r} has bounds. These will" + " be disregarded.".format(surface_air_pressure.name()) + ) + warnings.warn(msg, category=IrisIgnoringBoundsWarning, stacklevel=2) # Check units. + if sigma is not None and sigma.units.is_unknown(): + # Be graceful, and promote unknown to dimensionless units. + sigma.units = cf_units.Unit("1") + if sigma is not None and not sigma.units.is_dimensionless(): - raise ValueError('Invalid units: sigma must be dimensionless.') - if delta is not None and surface_air_pressure is not None and \ - delta.units != surface_air_pressure.units: - msg = 'Incompatible units: delta and ' \ - 'surface_air_pressure must have the same units.' + raise ValueError("Invalid units: sigma must be dimensionless.") + if ( + delta is not None + and surface_air_pressure is not None + and delta.units != surface_air_pressure.units + ): + msg = ( + "Incompatible units: delta and " + "surface_air_pressure must have the same units." + ) raise ValueError(msg) if delta is not None: @@ -588,34 +794,39 @@ def _check_dependencies(delta, sigma, else: units = surface_air_pressure.units - if not units.is_convertible('Pa'): - msg = 'Invalid units: delta and ' \ - 'surface_air_pressure must have units of pressure.' + if not units.is_convertible("Pa"): + msg = ( + "Invalid units: delta and " + "surface_air_pressure must have units of pressure." + ) raise ValueError(msg) @property def dependencies(self): - """ + """Return a dict mapping from constructor arg names to coordinates. + Returns a dictionary mapping from constructor argument names to the corresponding coordinates. """ - return {'delta': self.delta, 'sigma': self.sigma, - 'surface_air_pressure': self.surface_air_pressure} + return { + "delta": self.delta, + "sigma": self.sigma, + "surface_air_pressure": self.surface_air_pressure, + } def _derive(self, delta, sigma, surface_air_pressure): return delta + sigma * surface_air_pressure def make_coord(self, coord_dims_func): - """ - Returns a new :class:`iris.coords.AuxCoord` as defined by this - factory. - - Args: + """Return a new :class:`iris.coords.AuxCoord` as defined by this factory. - * coord_dims_func: + Parameters + ---------- + coord_dims_func : A callable which can return the list of dimensions relevant to a given coordinate. + See :meth:`iris.cube.Cube.coord_dims()`. """ @@ -625,92 +836,85 @@ def make_coord(self, coord_dims_func): # Build the points array. nd_points_by_key = self._remap(dependency_dims, derived_dims) - points = self._derive(nd_points_by_key['delta'], - nd_points_by_key['sigma'], - nd_points_by_key['surface_air_pressure']) + points = self._derive( + nd_points_by_key["delta"], + nd_points_by_key["sigma"], + nd_points_by_key["surface_air_pressure"], + ) bounds = None - if ((self.delta and self.delta.nbounds) or - (self.sigma and self.sigma.nbounds)): + if (self.delta and self.delta.nbounds) or (self.sigma and self.sigma.nbounds): # Build the bounds array. - nd_values_by_key = self._remap_with_bounds(dependency_dims, - derived_dims) - delta = nd_values_by_key['delta'] - sigma = nd_values_by_key['sigma'] - surface_air_pressure = nd_values_by_key['surface_air_pressure'] + nd_values_by_key = self._remap_with_bounds(dependency_dims, derived_dims) + delta = nd_values_by_key["delta"] + sigma = nd_values_by_key["sigma"] + surface_air_pressure = nd_values_by_key["surface_air_pressure"] ok_bound_shapes = [(), (1,), (2,)] if delta.shape[-1:] not in ok_bound_shapes: - raise ValueError('Invalid delta coordinate bounds.') + raise ValueError("Invalid delta coordinate bounds.") if sigma.shape[-1:] not in ok_bound_shapes: - raise ValueError('Invalid sigma coordinate bounds.') + raise ValueError("Invalid sigma coordinate bounds.") if surface_air_pressure.shape[-1:] not in [(), (1,)]: - warnings.warn('Surface pressure coordinate has bounds. ' - 'These are being disregarded.') - surface_air_pressure_pts = nd_points_by_key[ - 'surface_air_pressure'] + warnings.warn( + "Surface pressure coordinate has bounds. " + "These are being disregarded.", + category=IrisIgnoringBoundsWarning, + ) + surface_air_pressure_pts = nd_points_by_key["surface_air_pressure"] bds_shape = list(surface_air_pressure_pts.shape) + [1] - surface_air_pressure = surface_air_pressure_pts.reshape( - bds_shape) + surface_air_pressure = surface_air_pressure_pts.reshape(bds_shape) bounds = self._derive(delta, sigma, surface_air_pressure) hybrid_pressure = iris.coords.AuxCoord( - points, standard_name=self.standard_name, long_name=self.long_name, - var_name=self.var_name, units=self.units, bounds=bounds, - attributes=self.attributes, coord_system=self.coord_system) + points, + standard_name=self.standard_name, + long_name=self.long_name, + var_name=self.var_name, + units=self.units, + bounds=bounds, + attributes=self.attributes, + coord_system=self.coord_system, + ) return hybrid_pressure - def update(self, old_coord, new_coord=None): - """ - Notifies the factory of the removal/replacement of a coordinate - which might be a dependency. - - Args: - - * old_coord: - The coordinate to be removed/replaced. - * new_coord: - If None, any dependency using old_coord is removed, otherwise - any dependency using old_coord is updated to use new_coord. - - """ - new_dependencies = self.dependencies - for name, coord in self.dependencies.items(): - if old_coord is coord: - new_dependencies[name] = new_coord - try: - self._check_dependencies(**new_dependencies) - except ValueError as e: - msg = 'Failed to update dependencies. ' + str(e) - raise ValueError(msg) - else: - setattr(self, name, new_coord) - break - class OceanSigmaZFactory(AuxCoordFactory): """Defines an ocean sigma over z coordinate factory.""" - def __init__(self, sigma=None, eta=None, depth=None, - depth_c=None, nsigma=None, zlev=None): - """ - Creates a ocean sigma over z coordinate factory with the formula: + def __init__( + self, + sigma=None, + eta=None, + depth=None, + depth_c=None, + nsigma=None, + zlev=None, + ): + """Create an ocean sigma over z coordinate factory with the following formula. if k < nsigma: + + .. math:: z(n, k, j, i) = eta(n, j, i) + sigma(k) * - (min(depth_c, depth(j, i)) + eta(n, j, i)) + (min(depth_c, depth(j, i)) + eta(n, j, i)) if k >= nsigma: + + .. math:: z(n, k, j, i) = zlev(k) The `zlev` and 'nsigma' coordinates must be provided, and at least either `eta`, or 'sigma' and `depth` and `depth_c` coordinates. """ - super(OceanSigmaZFactory, self).__init__() + # Configure the metadata manager. + self._metadata_manager = metadata_manager_factory(CoordMetadata) + super().__init__() # Check that provided coordinates meet necessary conditions. self._check_dependencies(sigma, eta, depth, depth_c, nsigma, zlev) + self.units = zlev.units self.sigma = sigma self.eta = eta @@ -719,88 +923,110 @@ def __init__(self, sigma=None, eta=None, depth=None, self.nsigma = nsigma self.zlev = zlev - self.standard_name = 'sea_surface_height_above_reference_ellipsoid' - self.attributes = {'positive': 'up'} - - @property - def units(self): - return self.zlev.units + self.standard_name = "sea_surface_height_above_reference_ellipsoid" + self.attributes = {"positive": "up"} @staticmethod def _check_dependencies(sigma, eta, depth, depth_c, nsigma, zlev): # Check for sufficient factory coordinates. if zlev is None: - raise ValueError('Unable to determine units: ' - 'no zlev coordinate available.') + raise ValueError("Unable to determine units: no zlev coordinate available.") if nsigma is None: - raise ValueError('Missing nsigma coordinate.') + raise ValueError("Missing nsigma coordinate.") - if eta is None and (sigma is None or depth_c is None or - depth is None): - msg = 'Unable to construct ocean sigma over z coordinate ' \ - 'factory due to insufficient source coordinates.' + if eta is None and (sigma is None or depth_c is None or depth is None): + msg = ( + "Unable to construct ocean sigma over z coordinate " + "factory due to insufficient source coordinates." + ) raise ValueError(msg) # Check bounds and shape. - for coord, term in ((sigma, 'sigma'), (zlev, 'zlev')): + for coord, term in ((sigma, "sigma"), (zlev, "zlev")): if coord is not None and coord.nbounds not in (0, 2): - msg = 'Invalid {} coordinate {!r}: must have either ' \ - '0 or 2 bounds.'.format(term, coord.name()) + msg = ( + "Invalid {} coordinate {!r}: must have either " + "0 or 2 bounds.".format(term, coord.name()) + ) raise ValueError(msg) if sigma and sigma.nbounds != zlev.nbounds: - msg = 'The sigma coordinate {!r} and zlev coordinate {!r} ' \ - 'must be equally bounded.'.format(sigma.name(), zlev.name()) + msg = ( + "The sigma coordinate {!r} and zlev coordinate {!r} " + "must be equally bounded.".format(sigma.name(), zlev.name()) + ) raise ValueError(msg) - coords = ((eta, 'eta'), (depth, 'depth'), - (depth_c, 'depth_c'), (nsigma, 'nsigma')) + coords = ( + (eta, "eta"), + (depth, "depth"), + (depth_c, "depth_c"), + (nsigma, "nsigma"), + ) for coord, term in coords: if coord is not None and coord.nbounds: - msg = 'The {} coordinate {!r} has bounds. ' \ - 'These are being disregarded.'.format(term, coord.name()) - warnings.warn(msg, UserWarning, stacklevel=2) + msg = ( + "The {} coordinate {!r} has bounds. " + "These are being disregarded.".format(term, coord.name()) + ) + warnings.warn(msg, category=IrisIgnoringBoundsWarning, stacklevel=2) - for coord, term in ((depth_c, 'depth_c'), (nsigma, 'nsigma')): + for coord, term in ((depth_c, "depth_c"), (nsigma, "nsigma")): if coord is not None and coord.shape != (1,): - msg = 'Expected scalar {} coordinate {!r}: ' \ - 'got shape {!r}.'.format(term, coord.name(), coord.shape) + msg = "Expected scalar {} coordinate {!r}: got shape {!r}.".format( + term, coord.name(), coord.shape + ) raise ValueError(msg) # Check units. - if not zlev.units.is_convertible('m'): - msg = 'Invalid units: zlev coordinate {!r} ' \ - 'must have units of distance.'.format(zlev.name()) + if not zlev.units.is_convertible("m"): + msg = ( + "Invalid units: zlev coordinate {!r} " + "must have units of distance.".format(zlev.name()) + ) raise ValueError(msg) + if sigma is not None and sigma.units.is_unknown(): + # Be graceful, and promote unknown to dimensionless units. + sigma.units = cf_units.Unit("1") + if sigma is not None and not sigma.units.is_dimensionless(): - msg = 'Invalid units: sigma coordinate {!r} ' \ - 'must be dimensionless.'.format(sigma.name()) + msg = "Invalid units: sigma coordinate {!r} must be dimensionless.".format( + sigma.name() + ) raise ValueError(msg) - coords = ((eta, 'eta'), (depth_c, 'depth_c'), (depth, 'depth')) + coords = ((eta, "eta"), (depth_c, "depth_c"), (depth, "depth")) for coord, term in coords: if coord is not None and coord.units != zlev.units: - msg = 'Incompatible units: {} coordinate {!r} and zlev ' \ - 'coordinate {!r} must have ' \ - 'the same units.'.format(term, coord.name(), zlev.name()) + msg = ( + "Incompatible units: {} coordinate {!r} and zlev " + "coordinate {!r} must have " + "the same units.".format(term, coord.name(), zlev.name()) + ) raise ValueError(msg) @property def dependencies(self): - """ + """Return a dict mapping from constructor arg names to coordinates. + Returns a dictionary mapping from constructor argument names to the corresponding coordinates. """ - return dict(sigma=self.sigma, eta=self.eta, depth=self.depth, - depth_c=self.depth_c, nsigma=self.nsigma, zlev=self.zlev) + return dict( + sigma=self.sigma, + eta=self.eta, + depth=self.depth, + depth_c=self.depth_c, + nsigma=self.nsigma, + zlev=self.zlev, + ) - def _derive(self, sigma, eta, depth, depth_c, - zlev, nsigma, coord_dims_func): + def _derive(self, sigma, eta, depth, depth_c, zlev, nsigma, coord_dims_func): # Calculate the index of the 'z' dimension in the input arrays. # First find the cube 'z' dimension ... - [cube_z_dim] = coord_dims_func(self.dependencies['zlev']) + [cube_z_dim] = coord_dims_func(self.dependencies["zlev"]) # ... then calculate the corresponding dependency dimension. derived_cubedims = self.derived_dims(coord_dims_func) z_dim = derived_cubedims.index(cube_z_dim) @@ -809,9 +1035,8 @@ def _derive(self, sigma, eta, depth, depth_c, # Note: all the inputs have the same number of dimensions >= 1, except # for any missing dependencies, which have scalar values. allshapes = np.array( - [el.shape - for el in (sigma, eta, depth, depth_c, zlev) - if el.ndim > 0]) + [el.shape for el in (sigma, eta, depth, depth_c, zlev) if el.ndim > 0] + ) result_shape = list(np.max(allshapes, axis=0)) ndims = len(result_shape) @@ -842,30 +1067,26 @@ def _derive(self, sigma, eta, depth, depth_c, if len(result_shape) > 1: result_chunks = [1] * len(result_shape) result_chunks[-2:] = result_shape[-2:] - ones_full_result = da.ones(result_shape, chunks=result_chunks, - dtype=zlev.dtype) + ones_full_result = da.ones(result_shape, chunks=result_chunks, dtype=zlev.dtype) # Expand nsigma_levs to its full required shape : needed as the # calculated result may have a fixed size of 1 in some dimensions. result_nsigma_levs = nsigma_levs * ones_full_result[z_slices_nsigma] # Likewise, expand zlev to its full required shape. - result_rest_levs = (zlev[z_slices_rest] * - ones_full_result[z_slices_rest]) + result_rest_levs = zlev[z_slices_rest] * ones_full_result[z_slices_rest] # Combine nsigma and 'rest' levels for the final result. - result = da.concatenate([result_nsigma_levs, result_rest_levs], - axis=z_dim) + result = concatenate([result_nsigma_levs, result_rest_levs], axis=z_dim) return result def make_coord(self, coord_dims_func): - """ - Returns a new :class:`iris.coords.AuxCoord` as defined by this factory. + """Return a new :class:`iris.coords.AuxCoord` as defined by this factory. - Args: - - * coord_dims_func: - A callable which can return the list of dimesions relevant + Parameters + ---------- + coord_dims_func : + A callable which can return the list of dimensions relevant to a given coordinate. See :meth:`iris.cube.Cube.coord_dims()`. """ @@ -876,151 +1097,142 @@ def make_coord(self, coord_dims_func): # Build the points array. nd_points_by_key = self._remap(dependency_dims, derived_dims) - [nsigma] = nd_points_by_key['nsigma'] - points = self._derive(nd_points_by_key['sigma'], - nd_points_by_key['eta'], - nd_points_by_key['depth'], - nd_points_by_key['depth_c'], - nd_points_by_key['zlev'], - nsigma, - coord_dims_func) + [nsigma] = nd_points_by_key["nsigma"] + points = self._derive( + nd_points_by_key["sigma"], + nd_points_by_key["eta"], + nd_points_by_key["depth"], + nd_points_by_key["depth_c"], + nd_points_by_key["zlev"], + nsigma, + coord_dims_func, + ) bounds = None if self.zlev.nbounds or (self.sigma and self.sigma.nbounds): # Build the bounds array. - nd_values_by_key = self._remap_with_bounds(dependency_dims, - derived_dims) + nd_values_by_key = self._remap_with_bounds(dependency_dims, derived_dims) valid_shapes = [(), (1,), (2,)] - for key in ('sigma', 'zlev'): + for key in ("sigma", "zlev"): if nd_values_by_key[key].shape[-1:] not in valid_shapes: name = self.dependencies[key].name() - msg = 'Invalid bounds for {} ' \ - 'coordinate {!r}.'.format(key, name) + msg = "Invalid bounds for {} coordinate {!r}.".format(key, name) raise ValueError(msg) valid_shapes.pop() - for key in ('eta', 'depth', 'depth_c', 'nsigma'): + for key in ("eta", "depth", "depth_c", "nsigma"): if nd_values_by_key[key].shape[-1:] not in valid_shapes: name = self.dependencies[key].name() - msg = 'The {} coordinate {!r} has bounds. ' \ - 'These are being disregarded.'.format(key, name) - warnings.warn(msg, UserWarning, stacklevel=2) + msg = ( + "The {} coordinate {!r} has bounds. " + "These are being disregarded.".format(key, name) + ) + warnings.warn(msg, category=IrisIgnoringBoundsWarning, stacklevel=2) # Swap bounds with points. bds_shape = list(nd_points_by_key[key].shape) + [1] bounds = nd_points_by_key[key].reshape(bds_shape) nd_values_by_key[key] = bounds - bounds = self._derive(nd_values_by_key['sigma'], - nd_values_by_key['eta'], - nd_values_by_key['depth'], - nd_values_by_key['depth_c'], - nd_values_by_key['zlev'], - nsigma, - coord_dims_func) - - coord = iris.coords.AuxCoord(points, - standard_name=self.standard_name, - long_name=self.long_name, - var_name=self.var_name, - units=self.units, - bounds=bounds, - attributes=self.attributes, - coord_system=self.coord_system) + bounds = self._derive( + nd_values_by_key["sigma"], + nd_values_by_key["eta"], + nd_values_by_key["depth"], + nd_values_by_key["depth_c"], + nd_values_by_key["zlev"], + nsigma, + coord_dims_func, + ) + + coord = iris.coords.AuxCoord( + points, + standard_name=self.standard_name, + long_name=self.long_name, + var_name=self.var_name, + units=self.units, + bounds=bounds, + attributes=self.attributes, + coord_system=self.coord_system, + ) return coord - def update(self, old_coord, new_coord=None): - """ - Notifies the factory of the removal/replacement of a coordinate - which might be a dependency. - - Args: - - * old_coord: - The coordinate to be removed/replaced. - * new_coord: - If None, any dependency using old_coord is removed, otherwise - any dependency using old_coord is updated to use new_coord. - - """ - new_dependencies = self.dependencies - for name, coord in self.dependencies.items(): - if old_coord is coord: - new_dependencies[name] = new_coord - try: - self._check_dependencies(**new_dependencies) - except ValueError as e: - msg = 'Failed to update dependencies. ' + str(e) - raise ValueError(msg) - else: - setattr(self, name, new_coord) - break - class OceanSigmaFactory(AuxCoordFactory): """Defines an ocean sigma coordinate factory.""" def __init__(self, sigma=None, eta=None, depth=None): - """ - Creates an ocean sigma coordinate factory with the formula: + """Create an ocean sigma coordinate factory with the following formula. - z(n, k, j, i) = eta(n, j, i) + sigma(k) * - (depth(j, i) + eta(n, j, i)) + .. math:: + z(n, k, j, i) = eta(n, j, i) + sigma(k) * + (depth(j, i) + eta(n, j, i)) """ - super(OceanSigmaFactory, self).__init__() + # Configure the metadata manager. + self._metadata_manager = metadata_manager_factory(CoordMetadata) + super().__init__() # Check that provided coordinates meet necessary conditions. self._check_dependencies(sigma, eta, depth) + self.units = depth.units self.sigma = sigma self.eta = eta self.depth = depth - self.standard_name = 'sea_surface_height_above_reference_ellipsoid' - self.attributes = {'positive': 'up'} - - @property - def units(self): - return self.depth.units + self.standard_name = "sea_surface_height_above_reference_ellipsoid" + self.attributes = {"positive": "up"} @staticmethod def _check_dependencies(sigma, eta, depth): # Check for sufficient factory coordinates. if eta is None or sigma is None or depth is None: - msg = 'Unable to construct ocean sigma coordinate ' \ - 'factory due to insufficient source coordinates.' + msg = ( + "Unable to construct ocean sigma coordinate " + "factory due to insufficient source coordinates." + ) raise ValueError(msg) # Check bounds and shape. - coord, term = (sigma, 'sigma') + coord, term = (sigma, "sigma") if coord is not None and coord.nbounds not in (0, 2): - msg = 'Invalid {} coordinate {!r}: must have either ' \ - '0 or 2 bounds.'.format(term, coord.name()) + msg = "Invalid {} coordinate {!r}: must have either 0 or 2 bounds.".format( + term, coord.name() + ) raise ValueError(msg) - coords = ((eta, 'eta'), (depth, 'depth')) + coords = ((eta, "eta"), (depth, "depth")) for coord, term in coords: if coord is not None and coord.nbounds: - msg = 'The {} coordinate {!r} has bounds. ' \ - 'These are being disregarded.'.format(term, coord.name()) - warnings.warn(msg, UserWarning, stacklevel=2) + msg = ( + "The {} coordinate {!r} has bounds. " + "These are being disregarded.".format(term, coord.name()) + ) + warnings.warn(msg, category=IrisIgnoringBoundsWarning, stacklevel=2) # Check units. + if sigma is not None and sigma.units.is_unknown(): + # Be graceful, and promote unknown to dimensionless units. + sigma.units = cf_units.Unit("1") + if sigma is not None and not sigma.units.is_dimensionless(): - msg = 'Invalid units: sigma coordinate {!r} ' \ - 'must be dimensionless.'.format(sigma.name()) + msg = "Invalid units: sigma coordinate {!r} must be dimensionless.".format( + sigma.name() + ) raise ValueError(msg) - coords = ((eta, 'eta'), (depth, 'depth')) + coords = ((eta, "eta"), (depth, "depth")) for coord, term in coords: if coord is not None and coord.units != depth.units: - msg = 'Incompatible units: {} coordinate {!r} and depth ' \ - 'coordinate {!r} must have ' \ - 'the same units.'.format(term, coord.name(), depth.name()) + msg = ( + "Incompatible units: {} coordinate {!r} and depth " + "coordinate {!r} must have " + "the same units.".format(term, coord.name(), depth.name()) + ) raise ValueError(msg) @property def dependencies(self): - """ + """Return a dict mapping from constructor arg names to coordinates. + Returns a dictionary mapping from constructor argument names to the corresponding coordinates. @@ -1031,12 +1243,11 @@ def _derive(self, sigma, eta, depth): return eta + sigma * (depth + eta) def make_coord(self, coord_dims_func): - """ - Returns a new :class:`iris.coords.AuxCoord` as defined by this factory. - - Args: + """Return a new :class:`iris.coords.AuxCoord` as defined by this factory. - * coord_dims_func: + Parameters + ---------- + coord_dims_func : A callable which can return the list of dimensions relevant to a given coordinate. See :meth:`iris.cube.Cube.coord_dims()`. @@ -1047,93 +1258,77 @@ def make_coord(self, coord_dims_func): # Build the points array. nd_points_by_key = self._remap(dependency_dims, derived_dims) - points = self._derive(nd_points_by_key['sigma'], - nd_points_by_key['eta'], - nd_points_by_key['depth']) + points = self._derive( + nd_points_by_key["sigma"], + nd_points_by_key["eta"], + nd_points_by_key["depth"], + ) bounds = None if self.sigma and self.sigma.nbounds: # Build the bounds array. - nd_values_by_key = self._remap_with_bounds(dependency_dims, - derived_dims) + nd_values_by_key = self._remap_with_bounds(dependency_dims, derived_dims) valid_shapes = [(), (1,), (2,)] - key = 'sigma' + key = "sigma" if nd_values_by_key[key].shape[-1:] not in valid_shapes: name = self.dependencies[key].name() - msg = 'Invalid bounds for {} ' \ - 'coordinate {!r}.'.format(key, name) + msg = "Invalid bounds for {} coordinate {!r}.".format(key, name) raise ValueError(msg) valid_shapes.pop() - for key in ('eta', 'depth'): + for key in ("eta", "depth"): if nd_values_by_key[key].shape[-1:] not in valid_shapes: name = self.dependencies[key].name() - msg = 'The {} coordinate {!r} has bounds. ' \ - 'These are being disregarded.'.format(key, name) - warnings.warn(msg, UserWarning, stacklevel=2) + msg = ( + "The {} coordinate {!r} has bounds. " + "These are being disregarded.".format(key, name) + ) + warnings.warn(msg, category=IrisIgnoringBoundsWarning, stacklevel=2) # Swap bounds with points. bds_shape = list(nd_points_by_key[key].shape) + [1] bounds = nd_points_by_key[key].reshape(bds_shape) nd_values_by_key[key] = bounds - bounds = self._derive(nd_values_by_key['sigma'], - nd_values_by_key['eta'], - nd_values_by_key['depth']) - - coord = iris.coords.AuxCoord(points, - standard_name=self.standard_name, - long_name=self.long_name, - var_name=self.var_name, - units=self.units, - bounds=bounds, - attributes=self.attributes, - coord_system=self.coord_system) + bounds = self._derive( + nd_values_by_key["sigma"], + nd_values_by_key["eta"], + nd_values_by_key["depth"], + ) + + coord = iris.coords.AuxCoord( + points, + standard_name=self.standard_name, + long_name=self.long_name, + var_name=self.var_name, + units=self.units, + bounds=bounds, + attributes=self.attributes, + coord_system=self.coord_system, + ) return coord - def update(self, old_coord, new_coord=None): - """ - Notifies the factory of the removal/replacement of a coordinate - which might be a dependency. - - Args: - - * old_coord: - The coordinate to be removed/replaced. - * new_coord: - If None, any dependency using old_coord is removed, otherwise - any dependency using old_coord is updated to use new_coord. - - """ - new_dependencies = self.dependencies - for name, coord in self.dependencies.items(): - if old_coord is coord: - new_dependencies[name] = new_coord - try: - self._check_dependencies(**new_dependencies) - except ValueError as e: - msg = 'Failed to update dependencies. ' + str(e) - raise ValueError(msg) - else: - setattr(self, name, new_coord) - break - class OceanSg1Factory(AuxCoordFactory): """Defines an Ocean s-coordinate, generic form 1 factory.""" def __init__(self, s=None, c=None, eta=None, depth=None, depth_c=None): - """ - Creates an Ocean s-coordinate, generic form 1 factory with the formula: + """Create an Ocean s-coordinate, generic form 1 factory with the following formula. - z(n,k,j,i) = S(k,j,i) + eta(n,j,i) * (1 + S(k,j,i) / depth(j,i)) + .. math:: + z(n,k,j,i) = S(k,j,i) + eta(n,j,i) * (1 + S(k,j,i) / depth(j,i)) where: + + .. math:: S(k,j,i) = depth_c * s(k) + (depth(j,i) - depth_c) * C(k) """ - super(OceanSg1Factory, self).__init__() + # Configure the metadata manager. + self._metadata_manager = metadata_manager_factory(CoordMetadata) + super().__init__() # Check that provided coordinates meet necessary conditions. self._check_dependencies(s, c, eta, depth, depth_c) + self.units = depth.units self.s = s self.c = c @@ -1141,84 +1336,101 @@ def __init__(self, s=None, c=None, eta=None, depth=None, depth_c=None): self.depth = depth self.depth_c = depth_c - self.standard_name = 'sea_surface_height_above_reference_ellipsoid' - self.attributes = {'positive': 'up'} - - @property - def units(self): - return self.depth.units + self.standard_name = "sea_surface_height_above_reference_ellipsoid" + self.attributes = {"positive": "up"} @staticmethod def _check_dependencies(s, c, eta, depth, depth_c): # Check for sufficient factory coordinates. - if (eta is None or s is None or c is None or - depth is None or depth_c is None): - msg = 'Unable to construct Ocean s-coordinate, generic form 1 ' \ - 'factory due to insufficient source coordinates.' + if eta is None or s is None or c is None or depth is None or depth_c is None: + msg = ( + "Unable to construct Ocean s-coordinate, generic form 1 " + "factory due to insufficient source coordinates." + ) raise ValueError(msg) # Check bounds and shape. - coords = ((s, 's'), (c, 'c')) + coords = ((s, "s"), (c, "c")) for coord, term in coords: if coord is not None and coord.nbounds not in (0, 2): - msg = 'Invalid {} coordinate {!r}: must have either ' \ - '0 or 2 bounds.'.format(term, coord.name()) + msg = ( + "Invalid {} coordinate {!r}: must have either " + "0 or 2 bounds.".format(term, coord.name()) + ) raise ValueError(msg) if s and s.nbounds != c.nbounds: - msg = 'The s coordinate {!r} and c coordinate {!r} ' \ - 'must be equally bounded.'.format(s.name(), c.name()) + msg = ( + "The s coordinate {!r} and c coordinate {!r} " + "must be equally bounded.".format(s.name(), c.name()) + ) raise ValueError(msg) - coords = ((eta, 'eta'), (depth, 'depth')) + coords = ((eta, "eta"), (depth, "depth")) for coord, term in coords: if coord is not None and coord.nbounds: - msg = 'The {} coordinate {!r} has bounds. ' \ - 'These are being disregarded.'.format(term, coord.name()) - warnings.warn(msg, UserWarning, stacklevel=2) + msg = ( + "The {} coordinate {!r} has bounds. " + "These are being disregarded.".format(term, coord.name()) + ) + warnings.warn(msg, category=IrisIgnoringBoundsWarning, stacklevel=2) if depth_c is not None and depth_c.shape != (1,): - msg = 'Expected scalar {} coordinate {!r}: ' \ - 'got shape {!r}.'.format(term, coord.name(), coord.shape) + msg = "Expected scalar {} coordinate {!r}: got shape {!r}.".format( + term, coord.name(), coord.shape + ) raise ValueError(msg) # Check units. - coords = ((s, 's'), (c, 'c')) + coords = ((s, "s"), (c, "c")) for coord, term in coords: + if coord is not None and coord.units.is_unknown(): + # Be graceful, and promote unknown to dimensionless units. + coord.units = cf_units.Unit("1") + if coord is not None and not coord.units.is_dimensionless(): - msg = 'Invalid units: {} coordinate {!r} ' \ - 'must be dimensionless.'.format(term, coord.name()) + msg = ( + "Invalid units: {} coordinate {!r} " + "must be dimensionless.".format(term, coord.name()) + ) raise ValueError(msg) - coords = ((eta, 'eta'), (depth, 'depth'), (depth_c, 'depth_c')) + coords = ((eta, "eta"), (depth, "depth"), (depth_c, "depth_c")) for coord, term in coords: if coord is not None and coord.units != depth.units: - msg = 'Incompatible units: {} coordinate {!r} and depth ' \ - 'coordinate {!r} must have ' \ - 'the same units.'.format(term, coord.name(), depth.name()) + msg = ( + "Incompatible units: {} coordinate {!r} and depth " + "coordinate {!r} must have " + "the same units.".format(term, coord.name(), depth.name()) + ) raise ValueError(msg) @property def dependencies(self): - """ - Returns a dictionary mapping from constructor argument names to + """Return a dict mapping from constructor arg names to coordinates. + + Return a dictionary mapping from constructor argument names to the corresponding coordinates. """ - return dict(s=self.s, c=self.c, eta=self.eta, depth=self.depth, - depth_c=self.depth_c) + return dict( + s=self.s, + c=self.c, + eta=self.eta, + depth=self.depth, + depth_c=self.depth_c, + ) def _derive(self, s, c, eta, depth, depth_c): S = depth_c * s + (depth - depth_c) * c return S + eta * (1 + S / depth) def make_coord(self, coord_dims_func): - """ - Returns a new :class:`iris.coords.AuxCoord` as defined by this factory. - - Args: + """Return a new :class:`iris.coords.AuxCoord` as defined by this factory. - * coord_dims_func: + Parameters + ---------- + coord_dims_func : A callable which can return the list of dimensions relevant to a given coordinate. See :meth:`iris.cube.Cube.coord_dims()`. @@ -1229,100 +1441,85 @@ def make_coord(self, coord_dims_func): # Build the points array. nd_points_by_key = self._remap(dependency_dims, derived_dims) - points = self._derive(nd_points_by_key['s'], - nd_points_by_key['c'], - nd_points_by_key['eta'], - nd_points_by_key['depth'], - nd_points_by_key['depth_c']) + points = self._derive( + nd_points_by_key["s"], + nd_points_by_key["c"], + nd_points_by_key["eta"], + nd_points_by_key["depth"], + nd_points_by_key["depth_c"], + ) bounds = None if self.s.nbounds or (self.c and self.c.nbounds): # Build the bounds array. - nd_values_by_key = self._remap_with_bounds(dependency_dims, - derived_dims) + nd_values_by_key = self._remap_with_bounds(dependency_dims, derived_dims) valid_shapes = [(), (1,), (2,)] - key = 's' + key = "s" if nd_values_by_key[key].shape[-1:] not in valid_shapes: name = self.dependencies[key].name() - msg = 'Invalid bounds for {} ' \ - 'coordinate {!r}.'.format(key, name) + msg = "Invalid bounds for {} coordinate {!r}.".format(key, name) raise ValueError(msg) valid_shapes.pop() - for key in ('eta', 'depth', 'depth_c'): + for key in ("eta", "depth", "depth_c"): if nd_values_by_key[key].shape[-1:] not in valid_shapes: name = self.dependencies[key].name() - msg = 'The {} coordinate {!r} has bounds. ' \ - 'These are being disregarded.'.format(key, name) - warnings.warn(msg, UserWarning, stacklevel=2) + msg = ( + "The {} coordinate {!r} has bounds. " + "These are being disregarded.".format(key, name) + ) + warnings.warn(msg, category=IrisIgnoringBoundsWarning, stacklevel=2) # Swap bounds with points. bds_shape = list(nd_points_by_key[key].shape) + [1] bounds = nd_points_by_key[key].reshape(bds_shape) nd_values_by_key[key] = bounds - bounds = self._derive(nd_values_by_key['s'], - nd_values_by_key['c'], - nd_values_by_key['eta'], - nd_values_by_key['depth'], - nd_values_by_key['depth_c']) - - coord = iris.coords.AuxCoord(points, - standard_name=self.standard_name, - long_name=self.long_name, - var_name=self.var_name, - units=self.units, - bounds=bounds, - attributes=self.attributes, - coord_system=self.coord_system) + bounds = self._derive( + nd_values_by_key["s"], + nd_values_by_key["c"], + nd_values_by_key["eta"], + nd_values_by_key["depth"], + nd_values_by_key["depth_c"], + ) + + coord = iris.coords.AuxCoord( + points, + standard_name=self.standard_name, + long_name=self.long_name, + var_name=self.var_name, + units=self.units, + bounds=bounds, + attributes=self.attributes, + coord_system=self.coord_system, + ) return coord - def update(self, old_coord, new_coord=None): - """ - Notifies the factory of the removal/replacement of a coordinate - which might be a dependency. - - Args: - - * old_coord: - The coordinate to be removed/replaced. - * new_coord: - If None, any dependency using old_coord is removed, otherwise - any dependency using old_coord is updated to use new_coord. - - """ - new_dependencies = self.dependencies - for name, coord in self.dependencies.items(): - if old_coord is coord: - new_dependencies[name] = new_coord - try: - self._check_dependencies(**new_dependencies) - except ValueError as e: - msg = 'Failed to update dependencies. ' + str(e) - raise ValueError(msg) - else: - setattr(self, name, new_coord) - break - class OceanSFactory(AuxCoordFactory): """Defines an Ocean s-coordinate factory.""" - def __init__(self, s=None, eta=None, depth=None, a=None, b=None, - depth_c=None): - """ - Creates an Ocean s-coordinate factory with the formula: + def __init__(self, s=None, eta=None, depth=None, a=None, b=None, depth_c=None): + """Create an Ocean s-coordinate factory with a formula. + + .. math:: - z(n,k,j,i) = eta(n,j,i)*(1+s(k)) + depth_c*s(k) + - (depth(j,i)-depth_c)*C(k) + z(n,k,j,i) = eta(n,j,i)*(1+s(k)) + depth_c*s(k) + + (depth(j,i)-depth_c)*C(k) where: + + .. math:: + C(k) = (1-b) * sinh(a*s(k)) / sinh(a) + b * [tanh(a * (s(k) + 0.5)) / (2 * tanh(0.5*a)) - 0.5] """ - super(OceanSFactory, self).__init__() + # Configure the metadata manager. + self._metadata_manager = metadata_manager_factory(CoordMetadata) + super().__init__() # Check that provided coordinates meet necessary conditions. self._check_dependencies(s, eta, depth, a, b, depth_c) + self.units = depth.units self.s = s self.eta = eta @@ -1331,78 +1528,100 @@ def __init__(self, s=None, eta=None, depth=None, a=None, b=None, self.b = b self.depth_c = depth_c - self.standard_name = 'sea_surface_height_above_reference_ellipsoid' - self.attributes = {'positive': 'up'} - - @property - def units(self): - return self.depth.units + self.standard_name = "sea_surface_height_above_reference_ellipsoid" + self.attributes = {"positive": "up"} @staticmethod def _check_dependencies(s, eta, depth, a, b, depth_c): # Check for sufficient factory coordinates. - if (eta is None or s is None or depth is None or - a is None or b is None or depth_c is None): - msg = 'Unable to construct Ocean s-coordinate ' \ - 'factory due to insufficient source coordinates.' + if ( + eta is None + or s is None + or depth is None + or a is None + or b is None + or depth_c is None + ): + msg = ( + "Unable to construct Ocean s-coordinate " + "factory due to insufficient source coordinates." + ) raise ValueError(msg) # Check bounds and shape. if s is not None and s.nbounds not in (0, 2): - msg = 'Invalid s coordinate {!r}: must have either ' \ - '0 or 2 bounds.'.format(s.name()) + msg = "Invalid s coordinate {!r}: must have either 0 or 2 bounds.".format( + s.name() + ) raise ValueError(msg) - coords = ((eta, 'eta'), (depth, 'depth')) + coords = ((eta, "eta"), (depth, "depth")) for coord, term in coords: if coord is not None and coord.nbounds: - msg = 'The {} coordinate {!r} has bounds. ' \ - 'These are being disregarded.'.format(term, coord.name()) - warnings.warn(msg, UserWarning, stacklevel=2) + msg = ( + "The {} coordinate {!r} has bounds. " + "These are being disregarded.".format(term, coord.name()) + ) + warnings.warn(msg, category=IrisIgnoringBoundsWarning, stacklevel=2) - coords = ((a, 'a'), (b, 'b'), (depth_c, 'depth_c')) + coords = ((a, "a"), (b, "b"), (depth_c, "depth_c")) for coord, term in coords: if coord is not None and coord.shape != (1,): - msg = 'Expected scalar {} coordinate {!r}: ' \ - 'got shape {!r}.'.format(term, coord.name(), coord.shape) + msg = "Expected scalar {} coordinate {!r}: got shape {!r}.".format( + term, coord.name(), coord.shape + ) raise ValueError(msg) # Check units. + if s is not None and s.units.is_unknown(): + # Be graceful, and promote unknown to dimensionless units. + s.units = cf_units.Unit("1") + if s is not None and not s.units.is_dimensionless(): - msg = 'Invalid units: s coordinate {!r} ' \ - 'must be dimensionless.'.format(s.name()) + msg = "Invalid units: s coordinate {!r} must be dimensionless.".format( + s.name() + ) raise ValueError(msg) - coords = ((eta, 'eta'), (depth, 'depth'), (depth_c, 'depth_c')) + coords = ((eta, "eta"), (depth, "depth"), (depth_c, "depth_c")) for coord, term in coords: if coord is not None and coord.units != depth.units: - msg = 'Incompatible units: {} coordinate {!r} and depth ' \ - 'coordinate {!r} must have ' \ - 'the same units.'.format(term, coord.name(), depth.name()) + msg = ( + "Incompatible units: {} coordinate {!r} and depth " + "coordinate {!r} must have " + "the same units.".format(term, coord.name(), depth.name()) + ) raise ValueError(msg) @property def dependencies(self): - """ - Returns a dictionary mapping from constructor argument names to + """Return a dict mapping from constructor arg names to coordinates. + + Return a dictionary mapping from constructor argument names to the corresponding coordinates. """ - return dict(s=self.s, eta=self.eta, depth=self.depth, a=self.a, - b=self.b, depth_c=self.depth_c) + return dict( + s=self.s, + eta=self.eta, + depth=self.depth, + a=self.a, + b=self.b, + depth_c=self.depth_c, + ) def _derive(self, s, eta, depth, a, b, depth_c): - c = ((1 - b) * da.sinh(a * s) / da.sinh(a) + b * - (da.tanh(a * (s + 0.5)) / (2 * da.tanh(0.5 * a)) - 0.5)) + c = (1 - b) * da.sinh(a * s) / da.sinh(a) + b * ( + da.tanh(a * (s + 0.5)) / (2 * da.tanh(0.5 * a)) - 0.5 + ) return eta * (1 + s) + depth_c * s + (depth - depth_c) * c def make_coord(self, coord_dims_func): - """ - Returns a new :class:`iris.coords.AuxCoord` as defined by this factory. - - Args: + """Return a new :class:`iris.coords.AuxCoord` as defined by this factory. - * coord_dims_func: + Parameters + ---------- + coord_dims_func : A callable which can return the list of dimensions relevant to a given coordinate. See :meth:`iris.cube.Cube.coord_dims()`. @@ -1413,100 +1632,84 @@ def make_coord(self, coord_dims_func): # Build the points array. nd_points_by_key = self._remap(dependency_dims, derived_dims) - points = self._derive(nd_points_by_key['s'], - nd_points_by_key['eta'], - nd_points_by_key['depth'], - nd_points_by_key['a'], - nd_points_by_key['b'], - nd_points_by_key['depth_c']) + points = self._derive( + nd_points_by_key["s"], + nd_points_by_key["eta"], + nd_points_by_key["depth"], + nd_points_by_key["a"], + nd_points_by_key["b"], + nd_points_by_key["depth_c"], + ) bounds = None if self.s.nbounds: # Build the bounds array. - nd_values_by_key = self._remap_with_bounds(dependency_dims, - derived_dims) + nd_values_by_key = self._remap_with_bounds(dependency_dims, derived_dims) valid_shapes = [(), (1,), (2,)] - key = 's' + key = "s" if nd_values_by_key[key].shape[-1:] not in valid_shapes: name = self.dependencies[key].name() - msg = 'Invalid bounds for {} ' \ - 'coordinate {!r}.'.format(key, name) + msg = "Invalid bounds for {} coordinate {!r}.".format(key, name) raise ValueError(msg) valid_shapes.pop() - for key in ('eta', 'depth', 'a', 'b', 'depth_c'): + for key in ("eta", "depth", "a", "b", "depth_c"): if nd_values_by_key[key].shape[-1:] not in valid_shapes: name = self.dependencies[key].name() - msg = 'The {} coordinate {!r} has bounds. ' \ - 'These are being disregarded.'.format(key, name) - warnings.warn(msg, UserWarning, stacklevel=2) + msg = ( + "The {} coordinate {!r} has bounds. " + "These are being disregarded.".format(key, name) + ) + warnings.warn(msg, category=IrisIgnoringBoundsWarning, stacklevel=2) # Swap bounds with points. bds_shape = list(nd_points_by_key[key].shape) + [1] bounds = nd_points_by_key[key].reshape(bds_shape) nd_values_by_key[key] = bounds - bounds = self._derive(nd_values_by_key['s'], - nd_values_by_key['eta'], - nd_values_by_key['depth'], - nd_values_by_key['a'], - nd_values_by_key['b'], - nd_values_by_key['depth_c']) - - coord = iris.coords.AuxCoord(points, - standard_name=self.standard_name, - long_name=self.long_name, - var_name=self.var_name, - units=self.units, - bounds=bounds, - attributes=self.attributes, - coord_system=self.coord_system) + bounds = self._derive( + nd_values_by_key["s"], + nd_values_by_key["eta"], + nd_values_by_key["depth"], + nd_values_by_key["a"], + nd_values_by_key["b"], + nd_values_by_key["depth_c"], + ) + + coord = iris.coords.AuxCoord( + points, + standard_name=self.standard_name, + long_name=self.long_name, + var_name=self.var_name, + units=self.units, + bounds=bounds, + attributes=self.attributes, + coord_system=self.coord_system, + ) return coord - def update(self, old_coord, new_coord=None): - """ - Notifies the factory of the removal/replacement of a coordinate - which might be a dependency. - - Args: - - * old_coord: - The coordinate to be removed/replaced. - * new_coord: - If None, any dependency using old_coord is removed, otherwise - any dependency using old_coord is updated to use new_coord. - - """ - new_dependencies = self.dependencies - for name, coord in self.dependencies.items(): - if old_coord is coord: - new_dependencies[name] = new_coord - try: - self._check_dependencies(**new_dependencies) - except ValueError as e: - msg = 'Failed to update dependencies. ' + str(e) - raise ValueError(msg) - else: - setattr(self, name, new_coord) - break - class OceanSg2Factory(AuxCoordFactory): """Defines an Ocean s-coordinate, generic form 2 factory.""" def __init__(self, s=None, c=None, eta=None, depth=None, depth_c=None): - """ - Creates an Ocean s-coordinate, generic form 2 factory with the formula: + """Create an Ocean s-coordinate, generic form 2 factory with the following formula. - z(n,k,j,i) = eta(n,j,i) + (eta(n,j,i) + depth(j,i)) * S(k,j,i) + .. math:: + z(n,k,j,i) = eta(n,j,i) + (eta(n,j,i) + depth(j,i)) * S(k,j,i) where: + + .. math:: S(k,j,i) = (depth_c * s(k) + depth(j,i) * C(k)) / (depth_c + depth(j,i)) """ - super(OceanSg2Factory, self).__init__() + # Configure the metadata manager. + self._metadata_manager = metadata_manager_factory(CoordMetadata) + super().__init__() # Check that provided coordinates meet necessary conditions. self._check_dependencies(s, c, eta, depth, depth_c) + self.units = depth.units self.s = s self.c = c @@ -1514,84 +1717,101 @@ def __init__(self, s=None, c=None, eta=None, depth=None, depth_c=None): self.depth = depth self.depth_c = depth_c - self.standard_name = 'sea_surface_height_above_reference_ellipsoid' - self.attributes = {'positive': 'up'} - - @property - def units(self): - return self.depth.units + self.standard_name = "sea_surface_height_above_reference_ellipsoid" + self.attributes = {"positive": "up"} @staticmethod def _check_dependencies(s, c, eta, depth, depth_c): # Check for sufficient factory coordinates. - if (eta is None or s is None or c is None or - depth is None or depth_c is None): - msg = 'Unable to construct Ocean s-coordinate, generic form 2 ' \ - 'factory due to insufficient source coordinates.' + if eta is None or s is None or c is None or depth is None or depth_c is None: + msg = ( + "Unable to construct Ocean s-coordinate, generic form 2 " + "factory due to insufficient source coordinates." + ) raise ValueError(msg) # Check bounds and shape. - coords = ((s, 's'), (c, 'c')) + coords = ((s, "s"), (c, "c")) for coord, term in coords: if coord is not None and coord.nbounds not in (0, 2): - msg = 'Invalid {} coordinate {!r}: must have either ' \ - '0 or 2 bounds.'.format(term, coord.name()) + msg = ( + "Invalid {} coordinate {!r}: must have either " + "0 or 2 bounds.".format(term, coord.name()) + ) raise ValueError(msg) if s and s.nbounds != c.nbounds: - msg = 'The s coordinate {!r} and c coordinate {!r} ' \ - 'must be equally bounded.'.format(s.name(), c.name()) + msg = ( + "The s coordinate {!r} and c coordinate {!r} " + "must be equally bounded.".format(s.name(), c.name()) + ) raise ValueError(msg) - coords = ((eta, 'eta'), (depth, 'depth')) + coords = ((eta, "eta"), (depth, "depth")) for coord, term in coords: if coord is not None and coord.nbounds: - msg = 'The {} coordinate {!r} has bounds. ' \ - 'These are being disregarded.'.format(term, coord.name()) - warnings.warn(msg, UserWarning, stacklevel=2) + msg = ( + "The {} coordinate {!r} has bounds. " + "These are being disregarded.".format(term, coord.name()) + ) + warnings.warn(msg, category=IrisIgnoringBoundsWarning, stacklevel=2) if depth_c is not None and depth_c.shape != (1,): - msg = 'Expected scalar depth_c coordinate {!r}: ' \ - 'got shape {!r}.'.format(depth_c.name(), depth_c.shape) + msg = "Expected scalar depth_c coordinate {!r}: got shape {!r}.".format( + depth_c.name(), depth_c.shape + ) raise ValueError(msg) # Check units. - coords = ((s, 's'), (c, 'c')) + coords = ((s, "s"), (c, "c")) for coord, term in coords: + if coord is not None and coord.units.is_unknown(): + # Be graceful, and promote unknown to dimensionless units. + coord.units = cf_units.Unit("1") + if coord is not None and not coord.units.is_dimensionless(): - msg = 'Invalid units: {} coordinate {!r} ' \ - 'must be dimensionless.'.format(term, coord.name()) + msg = ( + "Invalid units: {} coordinate {!r} " + "must be dimensionless.".format(term, coord.name()) + ) raise ValueError(msg) - coords = ((eta, 'eta'), (depth, 'depth'), (depth_c, 'depth_c')) + coords = ((eta, "eta"), (depth, "depth"), (depth_c, "depth_c")) for coord, term in coords: if coord is not None and coord.units != depth.units: - msg = 'Incompatible units: {} coordinate {!r} and depth ' \ - 'coordinate {!r} must have ' \ - 'the same units.'.format(term, coord.name(), depth.name()) + msg = ( + "Incompatible units: {} coordinate {!r} and depth " + "coordinate {!r} must have " + "the same units.".format(term, coord.name(), depth.name()) + ) raise ValueError(msg) @property def dependencies(self): - """ - Returns a dictionary mapping from constructor argument names to + """Return a dicti mapping from constructor arg names to coordinates. + + Return a dictionary mapping from constructor argument names to the corresponding coordinates. """ - return dict(s=self.s, c=self.c, eta=self.eta, depth=self.depth, - depth_c=self.depth_c) + return dict( + s=self.s, + c=self.c, + eta=self.eta, + depth=self.depth, + depth_c=self.depth_c, + ) def _derive(self, s, c, eta, depth, depth_c): S = (depth_c * s + depth * c) / (depth_c + depth) return eta + (eta + depth) * S def make_coord(self, coord_dims_func): - """ - Returns a new :class:`iris.coords.AuxCoord` as defined by this factory. - - Args: + """Return a new :class:`iris.coords.AuxCoord` as defined by this factory. - * coord_dims_func: + Parameters + ---------- + coord_dims_func : A callable which can return the list of dimensions relevant to a given coordinate. See :meth:`iris.cube.Cube.coord_dims()`. @@ -1602,75 +1822,54 @@ def make_coord(self, coord_dims_func): # Build the points array. nd_points_by_key = self._remap(dependency_dims, derived_dims) - points = self._derive(nd_points_by_key['s'], - nd_points_by_key['c'], - nd_points_by_key['eta'], - nd_points_by_key['depth'], - nd_points_by_key['depth_c']) + points = self._derive( + nd_points_by_key["s"], + nd_points_by_key["c"], + nd_points_by_key["eta"], + nd_points_by_key["depth"], + nd_points_by_key["depth_c"], + ) bounds = None if self.s.nbounds or (self.c and self.c.nbounds): # Build the bounds array. - nd_values_by_key = self._remap_with_bounds(dependency_dims, - derived_dims) + nd_values_by_key = self._remap_with_bounds(dependency_dims, derived_dims) valid_shapes = [(), (1,), (2,)] - key = 's' + key = "s" if nd_values_by_key[key].shape[-1:] not in valid_shapes: name = self.dependencies[key].name() - msg = 'Invalid bounds for {} ' \ - 'coordinate {!r}.'.format(key, name) + msg = "Invalid bounds for {} coordinate {!r}.".format(key, name) raise ValueError(msg) valid_shapes.pop() - for key in ('eta', 'depth', 'depth_c'): + for key in ("eta", "depth", "depth_c"): if nd_values_by_key[key].shape[-1:] not in valid_shapes: name = self.dependencies[key].name() - msg = 'The {} coordinate {!r} has bounds. ' \ - 'These are being disregarded.'.format(key, name) - warnings.warn(msg, UserWarning, stacklevel=2) + msg = ( + "The {} coordinate {!r} has bounds. " + "These are being disregarded.".format(key, name) + ) + warnings.warn(msg, category=IrisIgnoringBoundsWarning, stacklevel=2) # Swap bounds with points. bds_shape = list(nd_points_by_key[key].shape) + [1] bounds = nd_points_by_key[key].reshape(bds_shape) nd_values_by_key[key] = bounds - bounds = self._derive(nd_values_by_key['s'], - nd_values_by_key['c'], - nd_values_by_key['eta'], - nd_values_by_key['depth'], - nd_values_by_key['depth_c']) - - coord = iris.coords.AuxCoord(points, - standard_name=self.standard_name, - long_name=self.long_name, - var_name=self.var_name, - units=self.units, - bounds=bounds, - attributes=self.attributes, - coord_system=self.coord_system) + bounds = self._derive( + nd_values_by_key["s"], + nd_values_by_key["c"], + nd_values_by_key["eta"], + nd_values_by_key["depth"], + nd_values_by_key["depth_c"], + ) + + coord = iris.coords.AuxCoord( + points, + standard_name=self.standard_name, + long_name=self.long_name, + var_name=self.var_name, + units=self.units, + bounds=bounds, + attributes=self.attributes, + coord_system=self.coord_system, + ) return coord - - def update(self, old_coord, new_coord=None): - """ - Notifies the factory of the removal/replacement of a coordinate - which might be a dependency. - - Args: - - * old_coord: - The coordinate to be removed/replaced. - * new_coord: - If None, any dependency using old_coord is removed, otherwise - any dependency using old_coord is updated to use new_coord. - - """ - new_dependencies = self.dependencies - for name, coord in self.dependencies.items(): - if old_coord is coord: - new_dependencies[name] = new_coord - try: - self._check_dependencies(**new_dependencies) - except ValueError as e: - msg = 'Failed to update dependencies. ' + str(e) - raise ValueError(msg) - else: - setattr(self, name, new_coord) - break diff --git a/lib/iris/common/__init__.py b/lib/iris/common/__init__.py new file mode 100644 index 0000000000..f9ad2bf207 --- /dev/null +++ b/lib/iris/common/__init__.py @@ -0,0 +1,10 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""A package for provisioning common Iris infrastructure.""" + +from .lenient import * +from .metadata import * +from .mixin import * +from .resolve import * diff --git a/lib/iris/common/_split_attribute_dicts.py b/lib/iris/common/_split_attribute_dicts.py new file mode 100644 index 0000000000..3e9c74cea9 --- /dev/null +++ b/lib/iris/common/_split_attribute_dicts.py @@ -0,0 +1,124 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Dictionary operations for dealing with the CubeAttrsDict "split"-style attribute dictionaries. + +The idea here is to convert a split-dictionary into a "plain" one for calculations, +whose keys are all pairs of the form ('global', ) or ('local', ). +And to convert back again after the operation, if the result is a dictionary. + +For "strict" operations this clearly does all that is needed. For lenient ones, +we _might_ want for local+global attributes of the same name to interact. +However, on careful consideration, it seems that this is not actually desirable for +any of the common-metadata operations. +So, we simply treat "global" and "local" attributes of the same name as entirely +independent. Which happily is also the easiest to code, and to explain. +""" + +from collections.abc import Mapping, Sequence +from functools import wraps + + +def _convert_splitattrs_to_pairedkeys_dict(dic): + """Convert a split-attributes dictionary to a "normal" dict. + + Transform a :class:`~iris.cube.CubeAttributesDict` "split" attributes dictionary + into a 'normal' :class:`dict`, with paired keys of the form ('global', name) or + ('local', name). + + If the input is *not* a split-attrs dict, it is converted to one before + transforming it. This will assign its keys to global/local depending on a standard + set of choices (see :class:`~iris.cube.CubeAttributesDict`). + """ + from iris.cube import CubeAttrsDict + + # Convert input to CubeAttrsDict + if not hasattr(dic, "globals") or not hasattr(dic, "locals"): + dic = CubeAttrsDict(dic) + + def _global_then_local_items(dic): + # Routine to produce global, then local 'items' in order, and with all keys + # "labelled" as local or global type, to ensure they are all unique. + for key, value in dic.globals.items(): + yield ("global", key), value + for key, value in dic.locals.items(): + yield ("local", key), value + + return dict(_global_then_local_items(dic)) + + +def _convert_pairedkeys_dict_to_splitattrs(dic): + """Convert an input with global/local paired keys back into a split-attrs dict. + + For now, this is always and only a :class:`iris.cube.CubeAttrsDict`. + """ + from iris.cube import CubeAttrsDict + + result = CubeAttrsDict() + for key, value in dic.items(): + keytype, keyname = key + if keytype == "global": + result.globals[keyname] = value + else: + assert keytype == "local" + result.locals[keyname] = value + return result + + +def adjust_for_split_attribute_dictionaries(operation): + """Generate attribute-dictionaries to work with split attributes. + + Decorator to make a function of attribute-dictionaries work with split attributes. + + The wrapped function of attribute-dictionaries is currently always one of "equals", + "combine" or "difference", with signatures like : + equals(left: dict, right: dict) -> bool + combine(left: dict, right: dict) -> dict + difference(left: dict, right: dict) -> None | (dict, dict) + + The results of the wrapped operation are either : + * for "equals" (or "__eq__") : a boolean + * for "combine" : a (converted) attributes-dictionary + * for "difference" : a list of (None or "pair"), where a pair contains two + dictionaries + + Before calling the wrapped operation, its inputs (left, right) are modified by + converting any "split" dictionaries to a form where the keys are pairs + of the form ("global", name) or ("local", name). + + After calling the wrapped operation, for "combine" or "difference", the result can + contain a dictionary or dictionaries. These are then transformed back from the + 'converted' form to split-attribute dictionaries, before returning. + + "Split" dictionaries are all of class :class:`~iris.cube.CubeAttrsDict`, since + the only usage of 'split' attribute dictionaries is in Cubes (i.e. they are not + used for cube components). + + """ + + @wraps(operation) + def _inner_function(*args, **kwargs): + # Convert all inputs into 'pairedkeys' type dicts + args = [_convert_splitattrs_to_pairedkeys_dict(arg) for arg in args] + + result = operation(*args, **kwargs) + + # Convert known specific cases of 'pairedkeys' dicts in the result, and convert + # those back into split-attribute dictionaries. + if isinstance(result, Mapping): + # Fix a result which is a single dictionary -- for "combine" + result = _convert_pairedkeys_dict_to_splitattrs(result) + elif isinstance(result, Sequence) and len(result) == 2: + # Fix a result which is a pair of dictionaries -- for "difference" + left, right = result + left, right = ( + _convert_pairedkeys_dict_to_splitattrs(left), + _convert_pairedkeys_dict_to_splitattrs(right), + ) + result = result.__class__([left, right]) + # ELSE: leave other types of result unchanged. E.G. None, bool + + return result + + return _inner_function diff --git a/lib/iris/common/lenient.py b/lib/iris/common/lenient.py new file mode 100644 index 0000000000..b26e0f1763 --- /dev/null +++ b/lib/iris/common/lenient.py @@ -0,0 +1,661 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Provides the infrastructure to support lenient client/service behaviour.""" + +from collections.abc import Iterable +from contextlib import contextmanager +from copy import deepcopy +from functools import wraps +from inspect import getmodule +import threading + +__all__ = [ + "LENIENT", + "Lenient", +] + + +#: Default _Lenient services global activation state. +_LENIENT_ENABLE_DEFAULT = True + +#: Default Lenient maths feature state. +_LENIENT_MATHS_DEFAULT = True + +#: Protected _Lenient internal non-client, non-service keys. +_LENIENT_PROTECTED = ("active", "enable") + + +def _lenient_client(*dargs, services=None): + """Allow a client function/method to declare at runtime. + + Decorator that allows a client function/method to declare at runtime that + it is executing and requires lenient behaviour from a prior registered + lenient service function/method. + + This decorator supports being called with no arguments e.g:: + + @_lenient_client() + def func(): + pass + + This is equivalent to using it as a simple naked decorator e.g:: + + @_lenient_client + def func() + pass + + Alternatively, this decorator supports the lenient client explicitly + declaring the lenient services that it wishes to use e.g:: + + @_lenient_client(services=(service1, service2, ...) + def func(): + pass + + Parameters + ---------- + dargs : tuple of callable + A tuple containing the callable lenient client function/method to be + wrapped by the decorator. This is automatically populated by Python + through the decorator interface. No argument requires to be manually + provided. + services : callable or str or iterable of callable/str, optional + Zero or more function/methods, or equivalent fully qualified string names, of + lenient service function/methods. + + Returns + ------- + Closure wrapped function/method. + + """ + ndargs = len(dargs) + + if ndargs: + assert ( + ndargs == 1 + ), f"Invalid lenient client arguments, expecting 1 got {ndargs}." + assert callable( + dargs[0] + ), "Invalid lenient client argument, expecting a callable." + + assert not ( + ndargs and services + ), "Invalid lenient client, got both arguments and keyword arguments." + + if ndargs: + # The decorator has been used as a simple naked decorator. + (func,) = dargs + + @wraps(func) + def lenient_client_inner_naked(*args, **kwargs): + """Closure wrapper function. + + Closure wrapper function to register the wrapped function/method + as active at runtime before executing it. + + """ + with _LENIENT.context(active=_qualname(func)): + result = func(*args, **kwargs) + return result + + result = lenient_client_inner_naked + else: + # The decorator has been called with None, zero or more explicit lenient services. + if services is None: + services = () + + if isinstance(services, str) or not isinstance(services, Iterable): + services = (services,) + + def lenient_client_outer(func): + @wraps(func) + def lenient_client_inner(*args, **kwargs): + """Closure wrapper function. + + Closure wrapper function to register the wrapped function/method + as active at runtime before executing it. + + """ + with _LENIENT.context(*services, active=_qualname(func)): + result = func(*args, **kwargs) + return result + + return lenient_client_inner + + result = lenient_client_outer + + return result + + +def _lenient_service(*dargs): + """Implement the lenient service protocol. + + Decorator that allows a function/method to declare that it supports lenient + behaviour as a service. + + Registration is at Python interpreter parse time. + + The decorator supports being called with no arguments e.g:: + + @_lenient_service() + def func(): + pass + + This is equivalent to using it as a simple naked decorator e.g:: + + @_lenient_service + def func(): + pass + + Parameters + ---------- + dargs : tuple of callable + A tuple containing the callable lenient service function/method to be + wrapped by the decorator. This is automatically populated by Python + through the decorator interface. No argument requires to be manually + provided. + + Returns + ------- + Closure wrapped function/method. + + """ + ndargs = len(dargs) + + if ndargs: + assert ( + ndargs == 1 + ), f"Invalid lenient service arguments, expecting 1 got {ndargs}." + assert callable( + dargs[0] + ), "Invalid lenient service argument, expecting a callable." + + if ndargs: + # The decorator has been used as a simple naked decorator. + # Thus the (single) argument is a function to be wrapped. + # We just register the argument function as a lenient service, and + # return it unchanged + (func,) = dargs + + _LENIENT.register_service(func) + + # This decorator registers 'func': the func itself is unchanged. + result = func + + else: + # The decorator has been called with no arguments. + # Return a decorator, to apply to 'func' immediately following. + def lenient_service_outer(func): + _LENIENT.register_service(func) + + # Decorator registers 'func', but func itself is unchanged. + return func + + result = lenient_service_outer + + return result + + +def _qualname(func): + """Return the fully qualified function/method string name. + + Parameters + ---------- + func : callable + Callable function/method. Non-callable arguments are simply + passed through. + + Notes + ----- + .. note:: + Inherited methods will be qualified with the base class that + defines the method. + + """ + result = func + if callable(func): + module = getmodule(func) + result = f"{module.__name__}.{func.__qualname__}" + + return result + + +class Lenient(threading.local): + def __init__(self, **kwargs): + """Container for managing the run-time lenient features and options. + + Parameters + ---------- + **kwargs : dict, optional + Mapping of lenient key/value options to enable/disable. Note that, + only the lenient "maths" options is available, which controls + lenient/strict cube arithmetic. + + Examples + -------- + :: + + Lenient(maths=False) + + Note that, the values of these options are thread-specific. + + """ + # Configure the initial default lenient state. + self._init() + + if not kwargs: + # If not specified, set the default behaviour of the maths lenient feature. + kwargs = dict(maths=_LENIENT_MATHS_DEFAULT) + + # Configure the provided (or default) lenient features. + for feature, state in kwargs.items(): + self[feature] = state + + def __contains__(self, key): + return key in self.__dict__ + + def __getitem__(self, key): + if key not in self.__dict__: + cls = self.__class__.__name__ + emsg = f"Invalid {cls!r} option, got {key!r}." + raise KeyError(emsg) + return self.__dict__[key] + + def __repr__(self): + cls = self.__class__.__name__ + msg = f"{cls}(maths={self.__dict__['maths']!r})" + return msg + + def __setitem__(self, key, value): + cls = self.__class__.__name__ + + if key not in self.__dict__: + emsg = f"Invalid {cls!r} option, got {key!r}." + raise KeyError(emsg) + + if not isinstance(value, bool): + emsg = f"Invalid {cls!r} option {key!r} value, got {value!r}." + raise ValueError(emsg) + + self.__dict__[key] = value + # Toggle the (private) lenient behaviour. + _LENIENT.enable = value + + def _init(self): + """Configure the initial default lenient state.""" + # This is the only public supported lenient feature i.e., cube arithmetic + self.__dict__["maths"] = None + + @contextmanager + def context(self, **kwargs): + """Context manager supporting temporary modification of lenient state. + + Return a context manager which allows temporary modification of the + lenient option state within the scope of the context manager. + + On entry to the context manager, all provided keyword arguments are + applied. On exit from the context manager, the previous lenient + option state is restored. + + For example:: + + with iris.common.Lenient.context(maths=False): + pass + + """ + + def configure_state(state): + for feature, value in state.items(): + self[feature] = value + + # Save the original state. + original_state = deepcopy(self.__dict__) + + # Configure the provided lenient features. + configure_state(kwargs) + + try: + yield + finally: + # Restore the original state. + self.__dict__.clear() + self._init() + configure_state(original_state) + + +############################################################################### + + +class _Lenient(threading.local): + def __init__(self, *args, **kwargs): + """Container for managing the run-time lenient services and client options. + + A container for managing the run-time lenient services and client + options for pre-defined functions/methods. + + Parameters + ---------- + *args : callable or str or iterable of callable/str + A function/method or fully qualified string name of the function/method + acting as a lenient service. + **kwargs : dict of callable/str or iterable of callable/str, optional + Mapping of lenient client function/method, or fully qualified string name + of the function/method, to one or more lenient service + function/methods or fully qualified string name of function/methods. + + Examples + -------- + :: + + _Lenient(service1, service2, client1=service1, client2=(service1, service2)) + + Note that, the values of these options are thread-specific. + + """ + # The executing lenient client at runtime. + self.__dict__["active"] = None + # The global lenient services state activation switch. + self.__dict__["enable"] = _LENIENT_ENABLE_DEFAULT + + for service in args: + self.register_service(service) + + for client, services in kwargs.items(): + self.register_client(client, services) + + def __call__(self, func): + """Determine whether it is valid for the function/method to provide a lenient service. + + Determine whether it is valid for the function/method to provide a + lenient service at runtime to the actively executing lenient client. + + Parameters + ---------- + func : callable or str + A function/method or fully qualified string name of the function/method. + + Returns + ------- + bool + + """ + result = False + if self.__dict__["enable"]: + service = _qualname(func) + if service in self and self.__dict__[service]: + active = self.__dict__["active"] + if active is not None and active in self: + services = self.__dict__[active] + if isinstance(services, str) or not isinstance(services, Iterable): + services = (services,) + result = service in services + return result + + def __contains__(self, name): + name = _qualname(name) + return name in self.__dict__ + + def __getattr__(self, name): + if name not in self.__dict__: + cls = self.__class__.__name__ + emsg = f"Invalid {cls!r} option, got {name!r}." + raise AttributeError(emsg) + return self.__dict__[name] + + def __getitem__(self, name): + name = _qualname(name) + if name not in self.__dict__: + cls = self.__class__.__name__ + emsg = f"Invalid {cls!r} option, got {name!r}." + raise KeyError(emsg) + return self.__dict__[name] + + def __repr__(self): + cls = self.__class__.__name__ + width = len(cls) + 1 + kwargs = [ + "{}={!r}".format(name, self.__dict__[name]) + for name in sorted(self.__dict__.keys()) + ] + joiner = ",\n{}".format(" " * width) + return "{}({})".format(cls, joiner.join(kwargs)) + + def __setitem__(self, name, value): + name = _qualname(name) + cls = self.__class__.__name__ + + if name not in self.__dict__: + emsg = f"Invalid {cls!r} option, got {name!r}." + raise KeyError(emsg) + + if name == "active": + value = _qualname(value) + if not isinstance(value, str) and value is not None: + emsg = f"Invalid {cls!r} option {name!r}, expected a registered {cls!r} client, got {value!r}." + raise ValueError(emsg) + self.__dict__[name] = value + elif name == "enable": + self.enable = value + else: + if isinstance(value, str) or callable(value): + value = (value,) + if isinstance(value, Iterable): + value = tuple([_qualname(item) for item in value]) + self.__dict__[name] = value + + @contextmanager + def context(self, *args, **kwargs): + """Context manager supporting temporary modification of lenient state. + + Return a context manager which allows temporary modification of + the lenient option state for the active thread. + + On entry to the context manager, all provided keyword arguments are + applied. On exit from the context manager, the previous lenient option + state is restored. + + For example:: + + with iris._LENIENT.context(example_lenient_flag=False): + # ... code that expects some non-lenient behaviour + + .. note:: + iris._LENIENT.example_lenient_flag does not exist and is + provided only as an example. + + """ + + def update_client(client, services): + if client in self.__dict__: + existing_services = self.__dict__[client] + else: + existing_services = () + + self.__dict__[client] = tuple(set(existing_services + services)) + + # Save the original state. + original_state = deepcopy(self.__dict__) + + # Temporarily update the state with the kwargs first. + for name, value in kwargs.items(): + self[name] = value + + # Get the active client. + active = self.__dict__["active"] + + if args: + # Update the client with the provided services. + new_services = tuple([_qualname(arg) for arg in args]) + + if active is None: + # Ensure not to use "context" as the ephemeral name + # of the context manager runtime "active" lenient client, + # as this causes a namespace clash with this method + # i.e., _Lenient.context, via _Lenient.__getattr__ + active = "__context" + self.__dict__["active"] = active + self.__dict__[active] = new_services + else: + # Append provided services to any pre-existing services of the active client. + update_client(active, new_services) + else: + # Append previous ephemeral services (for non-specific client) to the active client. + if ( + active is not None + and active != "__context" + and "__context" in self.__dict__ + ): + new_services = self.__dict__["__context"] + update_client(active, new_services) + + try: + yield + finally: + # Restore the original state. + self.__dict__.clear() + self.__dict__.update(original_state) + + @property + def enable(self): + """Return the activation state of the lenient services.""" + return self.__dict__["enable"] + + @enable.setter + def enable(self, state): + """Set the activate state of the lenient services. + + Setting the state to `False` disables all lenient services, and + setting the state to `True` enables all lenient services. + + Parameters + ---------- + state : bool + Activate state for lenient services. + + """ + if not isinstance(state, bool): + cls = self.__class__.__name__ + emsg = f"Invalid {cls!r} option 'enable', expected a {type(True)!r}, got {state!r}." + raise ValueError(emsg) + self.__dict__["enable"] = state + + def register_client(self, func, services, append=False): + """Add the lenient client to service mapping. + + Add the provided mapping of lenient client function/method to + required lenient service function/methods. + + Parameters + ---------- + func : callable or str + A client function/method or fully qualified string name of the + client function/method. + services : callable or str or iterable of callable/str + One or more service function/methods or fully qualified string names + of the required service function/method. + append : bool, default=False + If True, append the lenient services to any pre-registered lenient + services for the provided lenient client. Default is False. + + """ + func = _qualname(func) + cls = self.__class__.__name__ + + if func in _LENIENT_PROTECTED: + emsg = ( + f"Cannot register {cls!r} client. " + f"Please rename your client to be something other than {func!r}." + ) + raise ValueError(emsg) + if isinstance(services, str) or not isinstance(services, Iterable): + services = (services,) + if not len(services): + emsg = f"Require at least one {cls!r} client service." + raise ValueError(emsg) + services = tuple([_qualname(service) for service in services]) + if append: + # The original provided service order is not significant. There is + # no requirement to preserve it, so it's safe to sort. + existing = self.__dict__[func] if func in self else () + services = tuple(sorted(set(existing) | set(services))) + self.__dict__[func] = services + + def register_service(self, func): + """Add the provided function/method as providing a lenient service and activate it. + + Parameters + ---------- + func : callable or str + A service function/method or fully qualified string name of the + service function/method. + + """ + func = _qualname(func) + if func in _LENIENT_PROTECTED: + cls = self.__class__.__name__ + emsg = ( + f"Cannot register {cls!r} service. " + f"Please rename your service to be something other than {func!r}." + ) + raise ValueError(emsg) + self.__dict__[func] = True + + def unregister_client(self, func): + """Remove the provided function/method as a lenient client using lenient services. + + Parameters + ---------- + func : callable or str + A function/method of fully qualified string name of the function/method. + + """ + func = _qualname(func) + cls = self.__class__.__name__ + + if func in _LENIENT_PROTECTED: + emsg = f"Cannot unregister {cls!r} client, as {func!r} is a protected {cls!r} option." + raise ValueError(emsg) + + if func in self.__dict__: + value = self.__dict__[func] + if isinstance(value, bool): + emsg = f"Cannot unregister {cls!r} client, as {func!r} is not a valid {cls!r} client." + raise ValueError(emsg) + del self.__dict__[func] + else: + emsg = f"Cannot unregister unknown {cls!r} client {func!r}." + raise ValueError(emsg) + + def unregister_service(self, func): + """Remove the provided function/method as providing a lenient service. + + Parameters + ---------- + func : callable or str + A function/method or fully qualified string name of the function/method. + + """ + func = _qualname(func) + cls = self.__class__.__name__ + + if func in _LENIENT_PROTECTED: + emsg = f"Cannot unregister {cls!r} service, as {func!r} is a protected {cls!r} option." + raise ValueError(emsg) + + if func in self.__dict__: + value = self.__dict__[func] + if not isinstance(value, bool): + emsg = f"Cannot unregister {cls!r} service, as {func!r} is not a valid {cls!r} service." + raise ValueError(emsg) + del self.__dict__[func] + else: + emsg = f"Cannot unregister unknown {cls!r} service {func!r}." + raise ValueError(emsg) + + +#: (Private) Instance that manages all Iris run-time lenient client and service options. +_LENIENT = _Lenient() + +#: (Public) Instance that manages all Iris run-time lenient features. +LENIENT = Lenient() diff --git a/lib/iris/common/metadata.py b/lib/iris/common/metadata.py new file mode 100644 index 0000000000..bfbc75507e --- /dev/null +++ b/lib/iris/common/metadata.py @@ -0,0 +1,2021 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Provides the infrastructure to support the common metadata API.""" + +from __future__ import annotations + +from abc import ABCMeta +from collections import namedtuple +from collections.abc import Iterable, Mapping +from copy import deepcopy +from functools import lru_cache, wraps +import re +from typing import TYPE_CHECKING, Any + +import cf_units +import numpy as np +import numpy.ma as ma +from xxhash import xxh64_hexdigest + +if TYPE_CHECKING: + from iris.coords import CellMethod +from ..config import get_logger +from ._split_attribute_dicts import adjust_for_split_attribute_dictionaries +from .lenient import _LENIENT +from .lenient import _lenient_service as lenient_service +from .lenient import _qualname as qualname + +__all__ = [ + "AncillaryVariableMetadata", + "BaseMetadata", + "CellMeasureMetadata", + "CoordMetadata", + "CubeMetadata", + "DimCoordMetadata", + "MeshCoordMetadata", + "MeshMetadata", + "SERVICES", + "SERVICES_COMBINE", + "SERVICES_DIFFERENCE", + "SERVICES_EQUAL", + "hexdigest", + "metadata_filter", + "metadata_manager_factory", +] + + +# https://www.unidata.ucar.edu/software/netcdf/docs/netcdf_data_set_components.html#object_name + +_TOKEN_PARSE = re.compile(r"""^[a-zA-Z0-9][\w\.\+\-@]*$""") + +# Configure the logger. +logger = get_logger(__name__, fmt="[%(cls)s.%(funcName)s]") + + +def hexdigest(item): + """Calculate a hexadecimal string hash representation of the provided item. + + Calculates a 64-bit non-cryptographic hash of the provided item, using + the extremely fast ``xxhash`` hashing algorithm, and returns the hexdigest + string representation of the hash. + + This provides a means to compare large and/or complex objects through + simple string hexdigest comparison. + + Parameters + ---------- + item : object + The item that requires to have its hexdigest calculated. + + Returns + ------- + str + The string hexadecimal representation of the item's 64-bit hash. + + """ + # Special case: deal with numpy arrays. + if ma.isMaskedArray(item): + parts = ( + item.shape, + xxh64_hexdigest(item.data), + xxh64_hexdigest(item.mask), + ) + item = str(parts) + elif isinstance(item, np.ndarray): + parts = (item.shape, xxh64_hexdigest(item)) + item = str(parts) + + try: + # Calculate single-shot hash to avoid allocating state on the heap + result = xxh64_hexdigest(item) + except TypeError: + # xxhash expects a bytes-like object, so try hashing the + # string representation of the provided item instead, but + # also fold in the object type... + parts = (type(item), item) + result = xxh64_hexdigest(str(parts)) + + return result + + +class _NamedTupleMeta(ABCMeta): + """Meta-class convenience for creating a namedtuple. + + Meta-class to support the convenience of creating a namedtuple from + names/members of the metadata class hierarchy. + + """ + + def __new__(mcs, name, bases, namespace): + names = [] + + for base in bases: + if hasattr(base, "_fields"): + base_names = getattr(base, "_fields") + is_abstract = getattr(base_names, "__isabstractmethod__", False) + if not is_abstract: + if (not isinstance(base_names, Iterable)) or isinstance( + base_names, str + ): + base_names = (base_names,) + names.extend(base_names) + + if "_members" in namespace and not getattr( + namespace["_members"], "__isabstractmethod__", False + ): + namespace_names = namespace["_members"] + + if (not isinstance(namespace_names, Iterable)) or isinstance( + namespace_names, str + ): + namespace_names = (namespace_names,) + + names.extend(namespace_names) + + if names: + item = namedtuple(f"{name}Namedtuple", names) + bases = list(bases) + # Influence the appropriate MRO. + bases.insert(0, item) + bases = tuple(bases) + + return super().__new__(mcs, name, bases, namespace) + + +class BaseMetadata(metaclass=_NamedTupleMeta): + """Container for common metadata.""" + + DEFAULT_NAME = "unknown" # the fall-back name for metadata identity + + _members: str | Iterable[str] = ( + "standard_name", + "long_name", + "var_name", + "units", + "attributes", + ) + + __slots__ = () + + standard_name: str | None + long_name: str | None + var_name: str | None + units: cf_units.Unit + attributes: Any + + @lenient_service + def __eq__(self, other): + """Determine whether the associated metadata members are equivalent. + + Parameters + ---------- + other : metadata + A metadata instance of the same type. + + Returns + ------- + bool + + """ + result = NotImplemented + # Only perform equivalence with similar class instances. + if hasattr(other, "__class__") and other.__class__ is self.__class__: + if _LENIENT(self.__eq__) or _LENIENT(self.equal): + # Perform "lenient" equality. + logger.debug("lenient", extra=dict(cls=self.__class__.__name__)) + result = self._compare_lenient(other) + else: + # Perform "strict" equality. + logger.debug("strict", extra=dict(cls=self.__class__.__name__)) + + def func(field): + left = getattr(self, field) + right = getattr(other, field) + if self._is_attributes(field, left, right): + result = self._compare_strict_attributes(left, right) + else: + result = left == right + return result + + # Note that, for strict we use "_fields" not "_members". + # TODO: refactor so that 'non-participants' can be held in their specific subclasses. + # Certain members never participate in strict equivalence, so + # are filtered out. + fields = filter( + lambda field: field + not in ( + "circular", + "location_axis", + "node_dimension", + "edge_dimension", + "face_dimension", + ), + self._fields, + ) + result = all([func(field) for field in fields]) + + return result + + def __lt__(self, other): + # + # Support Python2 behaviour for a "<" operation involving a + # "NoneType" operand. + # + if not isinstance(other, self.__class__): + return NotImplemented + + def _sort_key(item): + keys = [] + for field in item._fields: + if field != "attributes": + value = getattr(item, field) + keys.extend((value is not None, value)) + return tuple(keys) + + return _sort_key(self) < _sort_key(other) + + def __ne__(self, other): + result = self.__eq__(other) + if result is not NotImplemented: + result = not result + + return result + + def __str__(self): + field_strings = [] + for field in self._fields: + value = getattr(self, field) + if value is None or isinstance(value, (str, Mapping)) and not value: + continue + field_strings.append(f"{field}={value}") + + return f"{type(self).__name__}({', '.join(field_strings)})" + + def _api_common(self, other, func_service, func_operation, action, lenient=None): + """Perform common entry-point for lenient metadata API methods. + + Parameters + ---------- + other : metadata + A metadata instance of the same type. + func_service : callable + The parent service method offering the API entry-point to the service. + func_operation : callable + The parent service method that provides the actual service. + action : str + The verb describing the service operation. + lenient : bool, optional + Enable/disable the lenient service operation. The default is to automatically + detect whether this lenient service operation is enabled. + + Returns + ------- + The result of the service operation to the parent service caller. + + """ + # Ensure that we have similar class instances. + if not hasattr(other, "__class__") or other.__class__ is not self.__class__: + emsg = "Cannot {} {!r} with {!r}." + raise TypeError(emsg.format(action, self.__class__.__name__, type(other))) + + if lenient is None: + result = func_operation(other) + else: + if lenient: + # Use qualname to disassociate from the instance bounded method. + args, kwargs = (qualname(func_service),), dict() + else: + # Use qualname to guarantee that the instance bounded method + # is a hashable key. + args, kwargs = (), {qualname(func_service): False} + + with _LENIENT.context(*args, **kwargs): + result = func_operation(other) + + return result + + def _combine(self, other): + """Perform associated metadata member combination.""" + if _LENIENT(self.combine): + # Perform "lenient" combine. + logger.debug("lenient", extra=dict(cls=self.__class__.__name__)) + values = self._combine_lenient(other) + else: + # Perform "strict" combine. + logger.debug("strict", extra=dict(cls=self.__class__.__name__)) + + def func(field): + left = getattr(self, field) + right = getattr(other, field) + if self._is_attributes(field, left, right): + result = self._combine_strict_attributes(left, right) + else: + result = left if left == right else None + return result + + # Note that, for strict we use "_fields" not "_members". + values = [func(field) for field in self._fields] + + return values + + def _combine_lenient(self, other): + """Perform lenient combination of metadata members. + + Parameters + ---------- + other : BaseMetadata + The other metadata participating in the lenient combination. + + Returns + ------- + A list of combined metadata member values. + + """ + + def func(field): + left = getattr(self, field) + right = getattr(other, field) + result = None + if field == "units": + # Perform "strict" combination for "units". + result = left if left == right else None + elif self._is_attributes(field, left, right): + result = self._combine_lenient_attributes(left, right) + else: + if left == right: + result = left + elif left is None: + result = right + elif right is None: + result = left + return result + + # Note that, we use "_members" not "_fields". + return [func(field) for field in BaseMetadata._members] + + @staticmethod + def _combine_lenient_attributes(left, right): + """Leniently combine the dictionary members together.""" + # Copy the dictionaries. + left = deepcopy(left) + right = deepcopy(right) + # Use xxhash to perform an extremely fast non-cryptographic hash of + # each dictionary key rvalue, thus ensuring that the dictionary is + # completely hashable, as required by a set. + sleft = {(k, hexdigest(v)) for k, v in left.items()} + sright = {(k, hexdigest(v)) for k, v in right.items()} + # Intersection of common items. + common = sleft & sright + # Items in sleft different from sright. + dsleft = dict(sleft - sright) + # Items in sright different from sleft. + dsright = dict(sright - sleft) + # Intersection of common item keys with different values. + keys = set(dsleft.keys()) & set(dsright.keys()) + # Remove (in-place) common item keys with different values. + [dsleft.pop(key) for key in keys] + [dsright.pop(key) for key in keys] + # Now bring the result together. + result = {k: left[k] for k, _ in common} + result.update({k: left[k] for k in dsleft.keys()}) + result.update({k: right[k] for k in dsright.keys()}) + + return result + + @staticmethod + def _combine_strict_attributes(left, right): + """Perform strict combination of the dictionary members.""" + # Copy the dictionaries. + left = deepcopy(left) + right = deepcopy(right) + # Use xxhash to perform an extremely fast non-cryptographic hash of + # each dictionary key rvalue, thus ensuring that the dictionary is + # completely hashable, as required by a set. + sleft = {(k, hexdigest(v)) for k, v in left.items()} + sright = {(k, hexdigest(v)) for k, v in right.items()} + # Intersection of common items. + common = sleft & sright + # Now bring the result together. + result = {k: left[k] for k, _ in common} + + return result + + def _compare_lenient(self, other): + """Perform lenient equality of metadata members. + + Parameters + ---------- + other : BaseMetadata + The other metadata participating in the lenient comparison. + + Returns + ------- + bool + + """ + result = False + + # Use the "name" method to leniently compare "standard_name", + # "long_name", and "var_name" in a well defined way. + if self.name() == other.name(): + + def func(field): + left = getattr(self, field) + right = getattr(other, field) + if field == "units": + # Perform "strict" compare for "units". + result = left == right + elif self._is_attributes(field, left, right): + result = self._compare_lenient_attributes(left, right) + else: + # Perform "lenient" compare for members. + result = (left == right) or left is None or right is None + return result + + # Note that, we use "_members" not "_fields". + # Lenient equality explicitly ignores the "var_name" member. + result = all( + [func(field) for field in BaseMetadata._members if field != "var_name"] + ) + + return result + + @staticmethod + def _compare_lenient_attributes(left, right): + """Perform lenient compare between the dictionary members.""" + # Use xxhash to perform an extremely fast non-cryptographic hash of + # each dictionary key rvalue, thus ensuring that the dictionary is + # completely hashable, as required by a set. + sleft = {(k, hexdigest(v)) for k, v in left.items()} + sright = {(k, hexdigest(v)) for k, v in right.items()} + # Items in sleft different from sright. + dsleft = dict(sleft - sright) + # Items in sright different from sleft. + dsright = dict(sright - sleft) + # Intersection of common item keys with different values. + keys = set(dsleft.keys()) & set(dsright.keys()) + + return not bool(keys) + + @staticmethod + def _compare_strict_attributes(left, right): + """Perform strict compare between the dictionary members.""" + # Use xxhash to perform an extremely fast non-cryptographic hash of + # each dictionary key rvalue, thus ensuring that the dictionary is + # completely hashable, as required by a set. + sleft = {(k, hexdigest(v)) for k, v in left.items()} + sright = {(k, hexdigest(v)) for k, v in right.items()} + + return sleft == sright + + def _difference(self, other): + """Perform associated metadata member difference.""" + if _LENIENT(self.difference): + # Perform "lenient" difference. + logger.debug("lenient", extra=dict(cls=self.__class__.__name__)) + values = self._difference_lenient(other) + else: + # Perform "strict" difference. + logger.debug("strict", extra=dict(cls=self.__class__.__name__)) + + def func(field): + left = getattr(self, field) + right = getattr(other, field) + if self._is_attributes(field, left, right): + result = self._difference_strict_attributes(left, right) + else: + result = None if left == right else (left, right) + return result + + # Note that, for strict we use "_fields" not "_members". + values = [func(field) for field in self._fields] + + return values + + def _difference_lenient(self, other): + """Perform lenient difference of metadata members. + + Parameters + ---------- + other : BaseMetadata + The other metadata participating in the lenient difference. + + Returns + ------- + A list of difference metadata member values. + + """ + + def func(field): + left = getattr(self, field) + right = getattr(other, field) + if field == "units": + # Perform "strict" difference for "units". + result = None if left == right else (left, right) + elif self._is_attributes(field, left, right): + result = self._difference_lenient_attributes(left, right) + else: + # Perform "lenient" difference for members. + result = ( + (left, right) + if left is not None and right is not None and left != right + else None + ) + return result + + # Note that, we use "_members" not "_fields". + return [func(field) for field in BaseMetadata._members] + + @staticmethod + def _difference_lenient_attributes(left, right): + """Perform lenient difference between the dictionary members.""" + # Use xxhash to perform an extremely fast non-cryptographic hash of + # each dictionary key rvalue, thus ensuring that the dictionary is + # completely hashable, as required by a set. + sleft = {(k, hexdigest(v)) for k, v in left.items()} + sright = {(k, hexdigest(v)) for k, v in right.items()} + # Items in sleft different from sright. + dsleft = dict(sleft - sright) + # Items in sright different from sleft. + dsright = dict(sright - sleft) + # Intersection of common item keys with different values. + keys = set(dsleft.keys()) & set(dsright.keys()) + # Keep (in-place) common item keys with different values. + [dsleft.pop(key) for key in list(dsleft.keys()) if key not in keys] + [dsright.pop(key) for key in list(dsright.keys()) if key not in keys] + + if not bool(dsleft) and not bool(dsright): + result = None + else: + # Replace hash-rvalue with original rvalue. + dsleft = {k: left[k] for k in dsleft.keys()} + dsright = {k: right[k] for k in dsright.keys()} + result = (dsleft, dsright) + + return result + + @staticmethod + def _difference_strict_attributes(left, right): + """Perform strict difference between the dictionary members.""" + # Use xxhash to perform an extremely fast non-cryptographic hash of + # each dictionary key rvalue, thus ensuring that the dictionary is + # completely hashable, as required by a set. + sleft = {(k, hexdigest(v)) for k, v in left.items()} + sright = {(k, hexdigest(v)) for k, v in right.items()} + # Items in sleft different from sright. + dsleft = dict(sleft - sright) + # Items in sright different from sleft. + dsright = dict(sright - sleft) + + if not bool(dsleft) and not bool(dsright): + result = None + else: + # Replace hash-rvalue with original rvalue. + dsleft = {k: left[k] for k in dsleft.keys()} + dsright = {k: right[k] for k in dsright.keys()} + result = (dsleft, dsright) + + return result + + @staticmethod + def _is_attributes(field, left, right): + """Determine whether we have two 'attributes' dictionaries.""" + return ( + field == "attributes" + and isinstance(left, Mapping) + and isinstance(right, Mapping) + ) + + @lenient_service + def combine(self, other, lenient=None): + """Return a new metadata instance created by combining each of the associated metadata members. + + Parameters + ---------- + other : metadata + A metadata instance of the same type. + lenient : bool, optional + Enable/disable lenient combination. The default is to automatically + detect whether this lenient operation is enabled. + + Returns + ------- + Metadata instance. + + """ + result = self._api_common( + other, self.combine, self._combine, "combine", lenient=lenient + ) + return self.__class__(*result) + + @lenient_service + def difference(self, other, lenient=None): + """Perform lenient metadata difference operation. + + Return a new metadata instance created by performing a difference + comparison between each of the associated metadata members. + + A metadata member returned with a value of "None" indicates that there + is no difference between the members being compared. Otherwise, a tuple + of the different values is returned. + + Parameters + ---------- + other : metadata + A metadata instance of the same type. + lenient : bool, optional + Enable/disable lenient difference. The default is to automatically + detect whether this lenient operation is enabled. + + Returns + ------- + Metadata instance of member differences or None. + + """ + result = self._api_common( + other, self.difference, self._difference, "differ", lenient=lenient + ) + result = ( + None if all([item is None for item in result]) else self.__class__(*result) + ) + return result + + @lenient_service + def equal(self, other, lenient=None): + """Determine whether the associated metadata members are equivalent. + + Parameters + ---------- + other : metadata + A metadata instance of the same type. + lenient : bool, optional + Enable/disable lenient equivalence. The default is to automatically + detect whether this lenient operation is enabled. + + Returns + ------- + bool + + """ + result = self._api_common( + other, self.equal, self.__eq__, "compare", lenient=lenient + ) + return result + + @classmethod + def from_metadata(cls, other): + """Convert metadata instance to this metadata type. + + Convert the provided metadata instance from a different type + to this metadata type, using only the relevant metadata members. + + Non-common metadata members are set to ``None``. + + Parameters + ---------- + other : metadata + A metadata instance of any type. + + Returns + ------- + New metadata instance. + + """ + result = None + if isinstance(other, BaseMetadata): + if other.__class__ is cls: + result = other + else: + kwargs = {field: None for field in cls._fields} + fields = set(cls._fields) & set(other._fields) + for field in fields: + kwargs[field] = getattr(other, field) + result = cls(**kwargs) + return result + + def name(self, default: str | None = None, token: bool = False) -> str: + """Return a string name representing the identity of the metadata. + + First it tries standard name, then it tries the long name, then + the NetCDF variable name, before falling-back to a default value, + which itself defaults to the string 'unknown'. + + Parameters + ---------- + default : + The fall-back string representing the default name. Defaults to + the string 'unknown'. + token : + If True, ensures that the name returned satisfies the criteria for + the characters required by a valid NetCDF name. If it is not + possible to return a valid name, then a ValueError exception is + raised. Defaults to False. + + Returns + ------- + str + + """ + + def _check(item): + return self.token(item) if token else item + + default = self.DEFAULT_NAME if default is None else default + + result = ( + _check(self.standard_name) + or _check(self.long_name) + or _check(self.var_name) + or _check(default) + ) + + if token and result is None: + emsg = "Cannot retrieve a valid name token from {!r}" + raise ValueError(emsg.format(self)) + + return result + + @classmethod + def token(cls, name): + """Verify validity of provided NetCDF name. + + Determine whether the provided name is a valid NetCDF name and thus + safe to represent a single parsable token. + + Parameters + ---------- + name : str + The string name to verify. + + Returns + ------- + The provided name if valid, otherwise None. + + """ + if name is not None: + result = _TOKEN_PARSE.match(name) + name = result if result is None else name + + return name + + +class AncillaryVariableMetadata(BaseMetadata): + """Metadata container for a :class:`~iris.coords.AncillaryVariableMetadata`.""" + + __slots__ = () + + @wraps(BaseMetadata.__eq__, assigned=("__doc__",), updated=()) + @lenient_service + def __eq__(self, other): + return super().__eq__(other) + + @wraps(BaseMetadata.combine, assigned=("__doc__",), updated=()) + @lenient_service + def combine(self, other, lenient=None): + return super().combine(other, lenient=lenient) + + @wraps(BaseMetadata.difference, assigned=("__doc__",), updated=()) + @lenient_service + def difference(self, other, lenient=None): + return super().difference(other, lenient=lenient) + + @wraps(BaseMetadata.equal, assigned=("__doc__",), updated=()) + @lenient_service + def equal(self, other, lenient=None): + return super().equal(other, lenient=lenient) + + +class CellMeasureMetadata(BaseMetadata): + """Metadata container for a :class:`~iris.coords.CellMeasure`.""" + + _members = "measure" + + __slots__ = () + + @wraps(BaseMetadata.__eq__, assigned=("__doc__",), updated=()) + @lenient_service + def __eq__(self, other): + return super().__eq__(other) + + def _combine_lenient(self, other): + """Perform lenient combination of metadata members for cell measures. + + Parameters + ---------- + other : CellMeasureMetadata + The other cell measure metadata participating in the lenient + combination. + + Returns + ------- + A list of combined metadata member values. + + """ + # Perform "strict" combination for "measure". + value = self.measure if self.measure == other.measure else None + # Perform lenient combination of the other parent members. + result = super()._combine_lenient(other) + result.append(value) + + return result + + def _compare_lenient(self, other): + """Perform lenient equality of metadata members for cell measures. + + Parameters + ---------- + other : CellMeasureMetadata + The other cell measure metadata participating in the lenient + comparison. + + Returns + ------- + bool + + """ + # Perform "strict" comparison for "measure". + result = self.measure == other.measure + if result: + # Perform lenient comparison of the other parent members. + result = super()._compare_lenient(other) + + return result + + def _difference_lenient(self, other): + """Perform lenient difference of metadata members for cell measures. + + Parameters + ---------- + other : CellMeasureMetadata + The other cell measure metadata participating in the lenient + difference. + + Returns + ------- + A list of difference metadata member values. + + """ + # Perform "strict" difference for "measure". + value = None if self.measure == other.measure else (self.measure, other.measure) + # Perform lenient difference of the other parent members. + result = super()._difference_lenient(other) + result.append(value) + + return result + + @wraps(BaseMetadata.combine, assigned=("__doc__",), updated=()) + @lenient_service + def combine(self, other, lenient=None): + return super().combine(other, lenient=lenient) + + @wraps(BaseMetadata.difference, assigned=("__doc__",), updated=()) + @lenient_service + def difference(self, other, lenient=None): + return super().difference(other, lenient=lenient) + + @wraps(BaseMetadata.equal, assigned=("__doc__",), updated=()) + @lenient_service + def equal(self, other, lenient=None): + return super().equal(other, lenient=lenient) + + +class CoordMetadata(BaseMetadata): + """Metadata container for a :class:`~iris.coords.Coord`.""" + + _members: str | Iterable[str] = ("coord_system", "climatological") + + __slots__ = () + + @wraps(BaseMetadata.__eq__, assigned=("__doc__",), updated=()) + @lenient_service + def __eq__(self, other): + # Convert a DimCoordMetadata instance to a CoordMetadata instance. + if ( + self.__class__ is CoordMetadata + and hasattr(other, "__class__") + and other.__class__ is DimCoordMetadata + ): + other = self.from_metadata(other) + return super().__eq__(other) + + def __lt__(self, other): + # + # Support Python2 behaviour for a "<" operation involving a + # "NoneType" operand. + # + if not isinstance(other, BaseMetadata): + return NotImplemented + + if other.__class__ is DimCoordMetadata: + other = self.from_metadata(other) + + if not isinstance(other, self.__class__): + return NotImplemented + + def _sort_key(item): + keys = [] + for field in item._fields: + if field not in ("attributes", "coord_system"): + value = getattr(item, field) + keys.extend((value is not None, value)) + return tuple(keys) + + return _sort_key(self) < _sort_key(other) + + def _combine_lenient(self, other): + """Perform lenient combination of metadata members for coordinates. + + Parameters + ---------- + other : CoordMetadata + The other coordinate metadata participating in the lenient + combination. + + Returns + ------- + A list of combined metadata member values. + + """ + + # Perform "strict" combination for "coord_system" and "climatological". + def func(field): + left = getattr(self, field) + right = getattr(other, field) + return left if left == right else None + + # Note that, we use "_members" not "_fields". + values = [func(field) for field in CoordMetadata._members] + # Perform lenient combination of the other parent members. + result = super()._combine_lenient(other) + result.extend(values) + + return result + + def _compare_lenient(self, other): + """Perform lenient equality of metadata members for coordinates. + + Parameters + ---------- + other : CoordMetadata + The other coordinate metadata participating in the lenient + comparison. + + Returns + ------- + bool + + """ + # Perform "strict" comparison for "coord_system" and "climatological". + result = all( + [ + getattr(self, field) == getattr(other, field) + for field in CoordMetadata._members + ] + ) + if result: + # Perform lenient comparison of the other parent members. + result = super()._compare_lenient(other) + + return result + + def _difference_lenient(self, other): + """Perform lenient difference of metadata members for coordinates. + + Parameters + ---------- + other : CoordMetadata + The other coordinate metadata participating in the lenient + difference. + + Returns + ------- + A list of difference metadata member values. + + """ + + # Perform "strict" difference for "coord_system" and "climatological". + def func(field): + left = getattr(self, field) + right = getattr(other, field) + return None if left == right else (left, right) + + # Note that, we use "_members" not "_fields". + values = [func(field) for field in CoordMetadata._members] + # Perform lenient difference of the other parent members. + result = super()._difference_lenient(other) + result.extend(values) + + return result + + @wraps(BaseMetadata.combine, assigned=("__doc__",), updated=()) + @lenient_service + def combine(self, other, lenient=None): + # Convert a DimCoordMetadata instance to a CoordMetadata instance. + if ( + self.__class__ is CoordMetadata + and hasattr(other, "__class__") + and other.__class__ is DimCoordMetadata + ): + other = self.from_metadata(other) + return super().combine(other, lenient=lenient) + + @wraps(BaseMetadata.difference, assigned=("__doc__",), updated=()) + @lenient_service + def difference(self, other, lenient=None): + # Convert a DimCoordMetadata instance to a CoordMetadata instance. + if ( + self.__class__ is CoordMetadata + and hasattr(other, "__class__") + and other.__class__ is DimCoordMetadata + ): + other = self.from_metadata(other) + return super().difference(other, lenient=lenient) + + @wraps(BaseMetadata.equal, assigned=("__doc__",), updated=()) + @lenient_service + def equal(self, other, lenient=None): + # Convert a DimCoordMetadata instance to a CoordMetadata instance. + if ( + self.__class__ is CoordMetadata + and hasattr(other, "__class__") + and other.__class__ is DimCoordMetadata + ): + other = self.from_metadata(other) + return super().equal(other, lenient=lenient) + + +class CubeMetadata(BaseMetadata): + """Metadata container for a :class:`~iris.cube.Cube`.""" + + _members = "cell_methods" + + cell_methods: tuple[CellMethod, ...] + + __slots__ = () + + @wraps(BaseMetadata.__eq__, assigned=("__doc__",), updated=()) + @lenient_service + def __eq__(self, other): + return super().__eq__(other) + + def __lt__(self, other): + # + # Support Python2 behaviour for a "<" operation involving a + # "NoneType" operand. + # + if not isinstance(other, self.__class__): + return NotImplemented + + def _sort_key(item): + keys = [] + for field in item._fields: + if field not in ("attributes", "cell_methods"): + value = getattr(item, field) + keys.extend((value is not None, value)) + return tuple(keys) + + return _sort_key(self) < _sort_key(other) + + def _combine_lenient(self, other): + """Perform lenient combination of metadata members for cubes. + + Parameters + ---------- + other : CubeMetadata + The other cube metadata participating in the lenient combination. + + Returns + ------- + A list of combined metadata member values. + + """ + # Perform "strict" combination for "cell_methods". + value = self.cell_methods if self.cell_methods == other.cell_methods else None + # Perform lenient combination of the other parent members. + result = super()._combine_lenient(other) + result.append(value) + + return result + + def _compare_lenient(self, other): + """Perform lenient equality of metadata members for cubes. + + Parameters + ---------- + other : CubeMetadata + The other cube metadata participating in the lenient comparison. + + Returns + ------- + bool + + """ + # Perform "strict" comparison for "cell_methods". + result = self.cell_methods == other.cell_methods + if result: + result = super()._compare_lenient(other) + + return result + + def _difference_lenient(self, other): + """Perform lenient difference of metadata members for cubes. + + Parameters + ---------- + other : CubeMetadata + The other cube metadata participating in the lenient difference. + + Returns + ------- + A list of difference metadata member values. + + """ + # Perform "strict" difference for "cell_methods". + value = ( + None + if self.cell_methods == other.cell_methods + else (self.cell_methods, other.cell_methods) + ) + # Perform lenient difference of the other parent members. + result = super()._difference_lenient(other) + result.append(value) + + return result + + @property + def _names(self): + """A tuple containing the value of each name participating in the identity of a cube. + + A tuple containing the value of each name participating in the identity + of a :class:`iris.cube.Cube`. This includes the standard name, + long name, NetCDF variable name, and the STASH from the attributes + dictionary. + + """ + standard_name = self.standard_name + long_name = self.long_name + var_name = self.var_name + + # Defensive enforcement of attributes being a dictionary. + if not isinstance(self.attributes, Mapping): + try: + self.attributes = dict() + except AttributeError: + emsg = "Invalid '{}.attributes' member, must be a mapping." + raise AttributeError(emsg.format(self.__class__.__name__)) + + stash_name = self.attributes.get("STASH") + if stash_name is not None: + stash_name = str(stash_name) + + return standard_name, long_name, var_name, stash_name + + @wraps(BaseMetadata.combine, assigned=("__doc__",), updated=()) + @lenient_service + def combine(self, other, lenient=None): + return super().combine(other, lenient=lenient) + + @wraps(BaseMetadata.difference, assigned=("__doc__",), updated=()) + @lenient_service + def difference(self, other, lenient=None): + return super().difference(other, lenient=lenient) + + @wraps(BaseMetadata.equal, assigned=("__doc__",), updated=()) + @lenient_service + def equal(self, other, lenient=None): + return super().equal(other, lenient=lenient) + + @wraps(BaseMetadata.name) + def name(self, default=None, token=False): + def _check(item): + return self.token(item) if token else item + + default = self.DEFAULT_NAME if default is None else default + + # Defensive enforcement of attributes being a dictionary. + if not isinstance(self.attributes, Mapping): + try: + self.attributes = dict() + except AttributeError: + emsg = "Invalid '{}.attributes' member, must be a mapping." + raise AttributeError(emsg.format(self.__class__.__name__)) + + result = ( + _check(self.standard_name) + or _check(self.long_name) + or _check(self.var_name) + or _check(str(self.attributes.get("STASH", ""))) + or _check(default) + ) + + if token and result is None: + emsg = "Cannot retrieve a valid name token from {!r}" + raise ValueError(emsg.format(self)) + + return result + + # + # Override each of the attribute-dict operations in BaseMetadata, to enable + # them to deal with split-attribute dictionaries correctly. + # There are 6 of these, for (equals/combine/difference) * (lenient/strict). + # Each is overridden with a *wrapped* version of the parent method, using the + # "@adjust_for_split_attribute_dictionaries" decorator, which converts any + # split-attribute dictionaries in the inputs to ordinary dicts, and likewise + # re-converts any dictionaries in the return value. + # + + @staticmethod + @adjust_for_split_attribute_dictionaries + def _combine_lenient_attributes(left, right): + return BaseMetadata._combine_lenient_attributes(left, right) + + @staticmethod + @adjust_for_split_attribute_dictionaries + def _combine_strict_attributes(left, right): + return BaseMetadata._combine_strict_attributes(left, right) + + @staticmethod + @adjust_for_split_attribute_dictionaries + def _compare_lenient_attributes(left, right): + return BaseMetadata._compare_lenient_attributes(left, right) + + @staticmethod + @adjust_for_split_attribute_dictionaries + def _compare_strict_attributes(left, right): + return BaseMetadata._compare_strict_attributes(left, right) + + @staticmethod + @adjust_for_split_attribute_dictionaries + def _difference_lenient_attributes(left, right): + return BaseMetadata._difference_lenient_attributes(left, right) + + @staticmethod + @adjust_for_split_attribute_dictionaries + def _difference_strict_attributes(left, right): + return BaseMetadata._difference_strict_attributes(left, right) + + +class DimCoordMetadata(CoordMetadata): + """Metadata container for a :class:`~iris.coords.DimCoord`.""" + + # The "circular" member is stateful only, and does not participate + # in lenient/strict equivalence. + _members = ("circular",) + + __slots__ = () + + @wraps(CoordMetadata.__eq__, assigned=("__doc__",), updated=()) + @lenient_service + def __eq__(self, other): + # Convert a CoordMetadata instance to a DimCoordMetadata instance. + if hasattr(other, "__class__") and other.__class__ is CoordMetadata: + other = self.from_metadata(other) + return super().__eq__(other) + + def __lt__(self, other): + # + # Support Python2 behaviour for a "<" operation involving a + # "NoneType" operand. + # + if not isinstance(other, BaseMetadata): + return NotImplemented + + if other.__class__ is CoordMetadata: + other = self.from_metadata(other) + + if not isinstance(other, self.__class__): + return NotImplemented + + def _sort_key(item): + keys = [] + for field in item._fields: + if field not in ("attributes", "coord_system"): + value = getattr(item, field) + keys.extend((value is not None, value)) + return tuple(keys) + + return _sort_key(self) < _sort_key(other) + + @wraps(CoordMetadata._combine_lenient, assigned=("__doc__",), updated=()) + def _combine_lenient(self, other): + # Perform "strict" combination for "circular". + value = self.circular if self.circular == other.circular else None + # Perform lenient combination of the other parent members. + result = super()._combine_lenient(other) + result.append(value) + + return result + + @wraps(CoordMetadata._compare_lenient, assigned=("__doc__",), updated=()) + def _compare_lenient(self, other): + # The "circular" member is not part of lenient equivalence. + return super()._compare_lenient(other) + + @wraps(CoordMetadata._difference_lenient, assigned=("__doc__",), updated=()) + def _difference_lenient(self, other): + # Perform "strict" difference for "circular". + value = ( + None if self.circular == other.circular else (self.circular, other.circular) + ) + # Perform lenient difference of the other parent members. + result = super()._difference_lenient(other) + result.append(value) + + return result + + @wraps(CoordMetadata.combine, assigned=("__doc__",), updated=()) + @lenient_service + def combine(self, other, lenient=None): + # Convert a CoordMetadata instance to a DimCoordMetadata instance. + if hasattr(other, "__class__") and other.__class__ is CoordMetadata: + other = self.from_metadata(other) + return super().combine(other, lenient=lenient) + + @wraps(CoordMetadata.difference, assigned=("__doc__",), updated=()) + @lenient_service + def difference(self, other, lenient=None): + # Convert a CoordMetadata instance to a DimCoordMetadata instance. + if hasattr(other, "__class__") and other.__class__ is CoordMetadata: + other = self.from_metadata(other) + return super().difference(other, lenient=lenient) + + @wraps(CoordMetadata.equal, assigned=("__doc__",), updated=()) + @lenient_service + def equal(self, other, lenient=None): + # Convert a CoordMetadata instance to a DimCoordMetadata instance. + if hasattr(other, "__class__") and other.__class__ is CoordMetadata: + other = self.from_metadata(other) + return super().equal(other, lenient=lenient) + + +class ConnectivityMetadata(BaseMetadata): + """Metadata container for a :class:`~iris.mesh.Connectivity`.""" + + # The "location_axis" member is stateful only, and does not participate in + # lenient/strict equivalence. + _members = ("cf_role", "start_index", "location_axis") + + __slots__ = () + + @wraps(BaseMetadata.__eq__, assigned=("__doc__",), updated=()) + @lenient_service + def __eq__(self, other): + return super().__eq__(other) + + def _combine_lenient(self, other): + """Perform lenient combination of metadata members for connectivities. + + Parameters + ---------- + other : ConnectivityMetadata + The other connectivity metadata participating in the lenient + combination. + + Returns + ------- + A list of combined metadata member values. + + """ + + # Perform "strict" combination for "cf_role", "start_index", "location_axis". + def func(field): + left = getattr(self, field) + right = getattr(other, field) + return left if left == right else None + + # Note that, we use "_members" not "_fields". + values = [func(field) for field in ConnectivityMetadata._members] + # Perform lenient combination of the other parent members. + result = super()._combine_lenient(other) + result.extend(values) + + return result + + def _compare_lenient(self, other): + """Perform lenient equality of metadata members for connectivities. + + Parameters + ---------- + other : ConnectivityMetadata + The other connectivity metadata participating in the lenient + comparison. + + Returns + ------- + bool + + """ + # Perform "strict" comparison for "cf_role", "start_index". + # The "location_axis" member is not part of lenient equivalence. + members = filter( + lambda member: member != "location_axis", + ConnectivityMetadata._members, + ) + result = all( + [getattr(self, field) == getattr(other, field) for field in members] + ) + if result: + # Perform lenient comparison of the other parent members. + result = super()._compare_lenient(other) + + return result + + def _difference_lenient(self, other): + """Perform lenient difference of metadata members for connectivities. + + Parameters + ---------- + other : ConnectivityMetadata + The other connectivity metadata participating in the lenient + difference. + + Returns + ------- + A list of difference metadata member values. + + """ + + # Perform "strict" difference for "cf_role", "start_index", "location_axis". + def func(field): + left = getattr(self, field) + right = getattr(other, field) + return None if left == right else (left, right) + + # Note that, we use "_members" not "_fields". + values = [func(field) for field in ConnectivityMetadata._members] + # Perform lenient difference of the other parent members. + result = super()._difference_lenient(other) + result.extend(values) + + return result + + @wraps(BaseMetadata.combine, assigned=("__doc__",), updated=()) + @lenient_service + def combine(self, other, lenient=None): + return super().combine(other, lenient=lenient) + + @wraps(BaseMetadata.difference, assigned=("__doc__",), updated=()) + @lenient_service + def difference(self, other, lenient=None): + return super().difference(other, lenient=lenient) + + @wraps(BaseMetadata.equal, assigned=("__doc__",), updated=()) + @lenient_service + def equal(self, other, lenient=None): + return super().equal(other, lenient=lenient) + + +class MeshMetadata(BaseMetadata): + """Metadata container for a :class:`~iris.mesh.MeshXY`.""" + + # The node_dimension", "edge_dimension" and "face_dimension" members are + # stateful only; they not participate in lenient/strict equivalence. + _members = ( + "topology_dimension", + "node_dimension", + "edge_dimension", + "face_dimension", + ) + + __slots__ = () + + @wraps(BaseMetadata.__eq__, assigned=("__doc__",), updated=()) + @lenient_service + def __eq__(self, other): + return super().__eq__(other) + + def _combine_lenient(self, other): + """Perform lenient combination of metadata members for meshes. + + Parameters + ---------- + other : MeshMetadata + The other mesh metadata participating in the lenient + combination. + + Returns + ------- + A list of combined metadata member values. + + """ + + # Perform "strict" combination for "topology_dimension", + # "node_dimension", "edge_dimension" and "face_dimension". + def func(field): + left = getattr(self, field) + right = getattr(other, field) + return left if left == right else None + + # Note that, we use "_members" not "_fields". + values = [func(field) for field in MeshMetadata._members] + # Perform lenient combination of the other parent members. + result = super()._combine_lenient(other) + result.extend(values) + + return result + + def _compare_lenient(self, other): + """Perform lenient equality of metadata members for meshes. + + Parameters + ---------- + other : MeshMetadata + The other mesh metadata participating in the lenient + comparison. + + Returns + ------- + bool + + """ + # Perform "strict" comparison for "topology_dimension". + # "node_dimension", "edge_dimension" and "face_dimension" are not part + # of lenient equivalence at all. + result = self.topology_dimension == other.topology_dimension + if result: + # Perform lenient comparison of the other parent members. + result = super()._compare_lenient(other) + + return result + + def _difference_lenient(self, other): + """Perform lenient difference of metadata members for meshes. + + Parameters + ---------- + other : MeshMetadata + The other mesh metadata participating in the lenient + difference. + + Returns + ------- + A list of difference metadata member values. + + """ + + # Perform "strict" difference for "topology_dimension", + # "node_dimension", "edge_dimension" and "face_dimension". + def func(field): + left = getattr(self, field) + right = getattr(other, field) + return None if left == right else (left, right) + + # Note that, we use "_members" not "_fields". + values = [func(field) for field in MeshMetadata._members] + # Perform lenient difference of the other parent members. + result = super()._difference_lenient(other) + result.extend(values) + + return result + + @wraps(BaseMetadata.combine, assigned=("__doc__",), updated=()) + @lenient_service + def combine(self, other, lenient=None): + return super().combine(other, lenient=lenient) + + @wraps(BaseMetadata.difference, assigned=("__doc__",), updated=()) + @lenient_service + def difference(self, other, lenient=None): + return super().difference(other, lenient=lenient) + + @wraps(BaseMetadata.equal, assigned=("__doc__",), updated=()) + @lenient_service + def equal(self, other, lenient=None): + return super().equal(other, lenient=lenient) + + +class MeshCoordMetadata(BaseMetadata): + """Metadata container for a :class:`~iris.coords.MeshCoord`.""" + + _members = ("location", "axis") + # NOTE: in future, we may add 'mesh' as part of this metadata, + # as the MeshXY seems part of the 'identity' of a MeshCoord. + # For now we omit it, particularly as we don't yet implement MeshXY.__eq__. + # + # Thus, for now, the MeshCoord class will need to handle 'mesh' explicitly + # in identity / comparison, but in future that may be simplified. + + __slots__ = () + + @wraps(BaseMetadata.__eq__, assigned=("__doc__",), updated=()) + @lenient_service + def __eq__(self, other): + return super().__eq__(other) + + def _combine_lenient(self, other): + """Perform lenient combination of metadata members for MeshCoord. + + Parameters + ---------- + other : MeshCoordMetadata + The other metadata participating in the lenient combination. + + Returns + ------- + A list of combined metadata member values. + + """ + + # It is actually "strict" : return None except where members are equal. + def func(field): + left = getattr(self, field) + right = getattr(other, field) + return left if left == right else None + + # Note that, we use "_members" not "_fields". + values = [func(field) for field in self._members] + # Perform lenient combination of the other parent members. + result = super()._combine_lenient(other) + result.extend(values) + + return result + + def _compare_lenient(self, other): + """Perform lenient equality of metadata members for MeshCoord. + + Parameters + ---------- + other : MeshCoordMetadata + The other metadata participating in the lenient comparison. + + Returns + ------- + bool + + """ + # Perform "strict" comparison for the MeshCoord specific members + # 'location', 'axis' : for equality, they must all match. + result = all( + [getattr(self, field) == getattr(other, field) for field in self._members] + ) + if result: + # Perform lenient comparison of the other parent members. + result = super()._compare_lenient(other) + + return result + + def _difference_lenient(self, other): + """Perform lenient difference of metadata members for MeshCoord. + + Parameters + ---------- + other : MeshCoordMetadata + The other MeshCoord metadata participating in the lenient + difference. + + Returns + ------- + A list of different metadata member values. + + """ + + # Perform "strict" difference for location / axis. + def func(field): + left = getattr(self, field) + right = getattr(other, field) + return None if left == right else (left, right) + + # Note that, we use "_members" not "_fields". + values = [func(field) for field in self._members] + # Perform lenient difference of the other parent members. + result = super()._difference_lenient(other) + result.extend(values) + + return result + + @wraps(BaseMetadata.combine, assigned=("__doc__",), updated=()) + @lenient_service + def combine(self, other, lenient=None): + return super().combine(other, lenient=lenient) + + @wraps(BaseMetadata.difference, assigned=("__doc__",), updated=()) + @lenient_service + def difference(self, other, lenient=None): + return super().difference(other, lenient=lenient) + + @wraps(BaseMetadata.equal, assigned=("__doc__",), updated=()) + @lenient_service + def equal(self, other, lenient=None): + return super().equal(other, lenient=lenient) + + +def metadata_filter( + instances, + item=None, + standard_name=None, + long_name=None, + var_name=None, + attributes=None, + axis=None, +): + """Filter a collection of objects by their metadata to fit the given metadata criteria. + + Criteria can be either specific properties or other objects with metadata + to be matched. + + Parameters + ---------- + instances : + One or more objects to be filtered. + item : optional + Either, + + * a :attr:`~iris.common.mixin.CFVariableMixin.standard_name`, + :attr:`~iris.common.mixin.CFVariableMixin.long_name`, or + :attr:`~iris.common.mixin.CFVariableMixin.var_name` which is compared + against the :meth:`~iris.common.mixin.CFVariableMixin.name`. + * a coordinate or metadata instance equal to that of + the desired objects e.g., :class:`~iris.coords.DimCoord` + or :class:`CoordMetadata`. + standard_name : optional + The CF standard name of the desired object. If ``None``, does not + check for ``standard_name``. + long_name : optional + An unconstrained description of the object. If ``None``, does not + check for ``long_name``. + var_name : optional + The NetCDF variable name of the desired object. If ``None``, does + not check for ``var_name``. + attributes : dict, optional + A dictionary of attributes desired on the object. If ``None``, + does not check for ``attributes``. + axis : optional + The desired object's axis, see :func:`~iris.util.guess_coord_axis`. + If ``None``, does not check for ``axis``. Accepts the values ``X``, + ``Y``, ``Z`` and ``T`` (case-insensitive). + + Returns + ------- + list of the objects + A list of the objects supplied in the ``instances`` argument, limited + to only those that matched the given criteria. + + """ + from ..util import guess_coord_axis + + name = None + obj = None + + if isinstance(item, str): + name = item + else: + obj = item + + # apply de morgan's law for one less logical operation + if not (isinstance(instances, str) or isinstance(instances, Iterable)): + instances = [instances] + + result = instances + + if name is not None: + result = [instance for instance in result if instance.name() == name] + + if standard_name is not None: + result = [ + instance for instance in result if instance.standard_name == standard_name + ] + + if long_name is not None: + result = [instance for instance in result if instance.long_name == long_name] + + if var_name is not None: + result = [instance for instance in result if instance.var_name == var_name] + + if attributes is not None: + if not isinstance(attributes, Mapping): + msg = ( + "The attributes keyword was expecting a dictionary " + "type, but got a %s instead." % type(attributes) + ) + raise ValueError(msg) + + def attr_filter(instance): + return all( + k in instance.attributes + and hexdigest(instance.attributes[k]) == hexdigest(v) + for k, v in attributes.items() + ) + + result = [instance for instance in result if attr_filter(instance)] + + if axis is not None: + axis = axis.upper() + + def get_axis(instance): + if hasattr(instance, "axis"): + axis = instance.axis.upper() + else: + axis = guess_coord_axis(instance) + return axis + + result = [instance for instance in result if get_axis(instance) == axis] + + if obj is not None: + if hasattr(obj, "__class__") and issubclass(obj.__class__, BaseMetadata): + target_metadata = obj + else: + target_metadata = obj.metadata + + result = [ + instance for instance in result if instance.metadata == target_metadata + ] + + return result + + +@lru_cache(maxsize=None) +def _factory_cache(cls): + def __init__(self, cls, **kwargs): + #: The metadata class to be manufactured by this factory. + self.cls = cls + + # Proxy for self.cls._fields for later internal use, as this + # saves on indirect property lookup via self.cls + self._fields = cls._fields + + # Initialise the metadata class fields in the instance. + # Use cls directly here since it's available. + for field in cls._fields: + setattr(self, field, None) + + # Populate with provided kwargs, which have already been verified + # by the factory. + for field, value in kwargs.items(): + setattr(self, field, value) + + def __eq__(self, other): + if not hasattr(other, "cls"): + return NotImplemented + match = self.cls is other.cls + if match: + match = self.values == other.values + + return match + + def __getstate__(self): + """Return the instance state to be pickled.""" + return {field: getattr(self, field) for field in self._fields} + + def __ne__(self, other): + match = self.__eq__(other) + if match is not NotImplemented: + match = not match + + return match + + def __reduce__(self): + """Use the __reduce__ interface to allow 'pickle' to recreate this class instance. + + Dynamically created classes at runtime cannot be pickled, due to not + being defined at the top level of a module. As a result, we require to + use the __reduce__ interface to allow 'pickle' to recreate this class + instance, and dump and load instance state successfully. + + """ + return metadata_manager_factory, (self.cls,), self.__getstate__() + + def __repr__(self): + args = ", ".join( + ["{}={!r}".format(field, getattr(self, field)) for field in self._fields] + ) + return "{}({})".format(self.__class__.__name__, args) + + def __setstate__(self, state): + """Set the instance state when unpickling.""" + for field, value in state.items(): + setattr(self, field, value) + + @property + def fields(self): + """Return the name of the metadata members.""" + # Proxy for built-in namedtuple._fields property. + return self._fields + + @property + def values(self): + fields = {field: getattr(self, field) for field in self._fields} + return self.cls(**fields) + + # Define the name, (inheritance) bases, and namespace of the dynamic class. + name = "MetadataManager" + bases = () + namespace = { + "DEFAULT_NAME": cls.DEFAULT_NAME, + "__init__": __init__, + "__eq__": __eq__, + "__getstate__": __getstate__, + "__ne__": __ne__, + "__reduce__": __reduce__, + "__repr__": __repr__, + "__setstate__": __setstate__, + "fields": fields, + "name": cls.name, + "token": cls.token, + "values": values, + } + + # Account for additional "CubeMetadata" specialised class behaviour. + if cls is CubeMetadata: + namespace["_names"] = cls._names + + # Dynamically create the metadata manager class. + MetadataManager = type(name, bases, namespace) + + return MetadataManager + + +def metadata_manager_factory(cls, **kwargs): + """Manufacturing metadata instances. + + A class instance factory function responsible for manufacturing + metadata instances dynamically at runtime. + + The factory instances returned by the factory are capable of managing + their metadata state, which can be proxied by the owning container. + + Parameters + ---------- + cls : + A subclass of :class:`~iris.common.metadata.BaseMetadata`, defining + the metadata to be managed. + **kwargs : dict, optional + Initial values for the manufactured metadata instance. Unspecified + fields will default to a value of 'None'. + + Returns + ------- + A manager instance for the provided metadata ``cls``. + + """ + # Check whether kwargs have valid fields for the specified metadata. + if kwargs: + extra = [field for field in kwargs.keys() if field not in cls._fields] + if extra: + bad = ", ".join(map(lambda field: "{!r}".format(field), extra)) + emsg = "Invalid {!r} field parameters, got {}." + raise ValueError(emsg.format(cls.__name__, bad)) + + # Dynamically create the metadata manager class at runtime or get a cached + # version of it. + MetadataManager = _factory_cache(cls) + + # Now manufacture an instance of the metadata manager class. + manager = MetadataManager(cls, **kwargs) + + return manager + + +#: Convenience collection of lenient metadata combine services. +SERVICES_COMBINE = ( + AncillaryVariableMetadata.combine, + BaseMetadata.combine, + CellMeasureMetadata.combine, + ConnectivityMetadata.combine, + CoordMetadata.combine, + CubeMetadata.combine, + DimCoordMetadata.combine, + MeshCoordMetadata.combine, + MeshMetadata.combine, +) + + +#: Convenience collection of lenient metadata difference services. +SERVICES_DIFFERENCE = ( + AncillaryVariableMetadata.difference, + BaseMetadata.difference, + CellMeasureMetadata.difference, + ConnectivityMetadata.difference, + CoordMetadata.difference, + CubeMetadata.difference, + DimCoordMetadata.difference, + MeshCoordMetadata.difference, + MeshMetadata.difference, +) + + +#: Convenience collection of lenient metadata equality services. +SERVICES_EQUAL = ( + AncillaryVariableMetadata.__eq__, + AncillaryVariableMetadata.equal, + BaseMetadata.__eq__, + BaseMetadata.equal, + CellMeasureMetadata.__eq__, + CellMeasureMetadata.equal, + ConnectivityMetadata.__eq__, + ConnectivityMetadata.equal, + CoordMetadata.__eq__, + CoordMetadata.equal, + CubeMetadata.__eq__, + CubeMetadata.equal, + DimCoordMetadata.__eq__, + DimCoordMetadata.equal, + MeshCoordMetadata.__eq__, + MeshCoordMetadata.equal, + MeshMetadata.__eq__, + MeshMetadata.equal, +) + +#: Convenience collection of lenient metadata services. +SERVICES = SERVICES_COMBINE + SERVICES_DIFFERENCE + SERVICES_EQUAL diff --git a/lib/iris/common/mixin.py b/lib/iris/common/mixin.py new file mode 100644 index 0000000000..8e89f0ccd0 --- /dev/null +++ b/lib/iris/common/mixin.py @@ -0,0 +1,263 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Provides common metadata mixin behaviour.""" + +from __future__ import annotations + +from collections.abc import Mapping +from functools import wraps +from typing import Any + +import cf_units + +import iris.std_names + +from .metadata import BaseMetadata + +__all__ = ["CFVariableMixin", "LimitedAttributeDict"] + + +def _get_valid_standard_name(name): + # Standard names are optionally followed by a standard name + # modifier, separated by one or more blank spaces + + if name is not None: + # Supported standard name modifiers. Ref: [CF] Appendix C. + valid_std_name_modifiers = [ + "detection_minimum", + "number_of_observations", + "standard_error", + "status_flag", + ] + + name_groups = name.split(maxsplit=1) + if name_groups: + std_name = name_groups[0] + name_is_valid = std_name in iris.std_names.STD_NAMES + try: + std_name_modifier = name_groups[1] + except IndexError: + pass # No modifier + else: + name_is_valid &= std_name_modifier in valid_std_name_modifiers + + if not name_is_valid: + raise ValueError("{!r} is not a valid standard_name".format(name)) + + return name + + +class LimitedAttributeDict(dict): + """A specialised 'dict' subclass, which forbids (errors) certain attribute names. + + Used for the attribute dictionaries of all Iris data objects (that is, + :class:`CFVariableMixin` and its subclasses). + + The "excluded" attributes are those which either :mod:`netCDF4` or Iris intpret and + control with special meaning, which therefore should *not* be defined as custom + 'user' attributes on Iris data objects such as cubes. + + For example : "coordinates", "grid_mapping", "scale_factor". + + The 'forbidden' attributes are those listed in + :data:`iris.common.mixin.LimitedAttributeDict.CF_ATTRS_FORBIDDEN` . + + All the forbidden attributes are amongst those listed in + `Appendix A of the CF Conventions: `_ + -- however, not *all* of them, since not all are interpreted by Iris. + + """ + + #: Attributes with special CF meaning, forbidden in Iris attribute dictionaries. + CF_ATTRS_FORBIDDEN = ( + "standard_name", + "long_name", + "units", + "bounds", + "axis", + "calendar", + "leap_month", + "leap_year", + "month_lengths", + "coordinates", + "grid_mapping", + "climatology", + "cell_methods", + "formula_terms", + "compress", + "add_offset", + "scale_factor", + "_FillValue", + ) + + def __init__(self, *args, **kwargs): + dict.__init__(self, *args, **kwargs) + # Check validity of keys + for key in self.keys(): + if key in self.CF_ATTRS_FORBIDDEN: + raise ValueError(f"{key!r} is not a permitted attribute") + + def __eq__(self, other): + # Extend equality to allow for NumPy arrays. + match = set(self.keys()) == set(other.keys()) + if match: + for key, value in self.items(): + match = value == other[key] + try: + match = bool(match) + except ValueError: + match = match.all() + if not match: + break + return match + + def __ne__(self, other): + return not self == other + + def __setitem__(self, key, value): + if key in self.CF_ATTRS_FORBIDDEN: + raise ValueError(f"{key!r} is not a permitted attribute") + dict.__setitem__(self, key, value) + + def update(self, other, **kwargs): + """Perform standard ``dict.update()`` operation.""" + # Gather incoming keys + keys = [] + if hasattr(other, "keys"): + keys += list(other.keys()) + else: + keys += [k for k, v in other] + + keys += list(kwargs.keys()) + + # Check validity of keys + for key in keys: + if key in self.CF_ATTRS_FORBIDDEN: + raise ValueError(f"{key!r} is not a permitted attribute") + + dict.update(self, other, **kwargs) + + +class CFVariableMixin: + _metadata_manager: Any + + @wraps(BaseMetadata.name) + def name( + self, + default: str | None = None, + token: bool | None = None, + ) -> str: + return self._metadata_manager.name(default=default, token=token) + + def rename(self, name: str | None) -> None: + """Change the human-readable name. + + If 'name' is a valid standard name it will assign it to + :attr:`standard_name`, otherwise it will assign it to + :attr:`long_name`. + + """ + try: + self.standard_name = name + self.long_name = None + except ValueError: + self.standard_name = None + self.long_name = str(name) + + # Always clear var_name when renaming. + self.var_name = None + + @property + def standard_name(self) -> str | None: + """The CF Metadata standard name for the object.""" + return self._metadata_manager.standard_name + + @standard_name.setter + def standard_name(self, name: str | None) -> None: + self._metadata_manager.standard_name = _get_valid_standard_name(name) + + @property + def long_name(self) -> str | None: + """The CF Metadata long name for the object.""" + return self._metadata_manager.long_name + + @long_name.setter + def long_name(self, name: str | None) -> None: + self._metadata_manager.long_name = name + + @property + def var_name(self) -> str | None: + """The NetCDF variable name for the object.""" + return self._metadata_manager.var_name + + @var_name.setter + def var_name(self, name: str | None) -> None: + if name is not None: + result = self._metadata_manager.token(name) + if result is None or not name: + emsg = "{!r} is not a valid NetCDF variable name." + raise ValueError(emsg.format(name)) + self._metadata_manager.var_name = name + + @property + def units(self) -> cf_units.Unit: + """The S.I. unit of the object.""" + return self._metadata_manager.units + + @units.setter + def units(self, unit: cf_units.Unit | str | None) -> None: + self._metadata_manager.units = cf_units.as_unit(unit) + + @property + def attributes(self) -> LimitedAttributeDict: + return self._metadata_manager.attributes + + @attributes.setter + def attributes(self, attributes: Mapping) -> None: + self._metadata_manager.attributes = LimitedAttributeDict(attributes or {}) + + @property + def metadata(self): + return self._metadata_manager.values + + @metadata.setter + def metadata(self, metadata): + cls = self._metadata_manager.cls + fields = self._metadata_manager.fields + arg = metadata + + try: + # Try dict-like initialisation... + metadata = cls(**metadata) + except TypeError: + try: + # Try iterator/namedtuple-like initialisation... + metadata = cls(*metadata) + except TypeError: + if hasattr(metadata, "_asdict"): + metadata = metadata._asdict() + + if isinstance(metadata, Mapping): + fields = [field for field in fields if field in metadata] + else: + # Generic iterable/container with no associated keys. + missing = [ + field for field in fields if not hasattr(metadata, field) + ] + + if missing: + missing = ", ".join(map(lambda i: "{!r}".format(i), missing)) + emsg = "Invalid {!r} metadata, require {} to be specified." + raise TypeError(emsg.format(type(arg), missing)) + + for field in fields: + if hasattr(metadata, field): + value = getattr(metadata, field) + else: + value = metadata[field] + + # Ensure to always set state through the individual mixin/container + # setter functions. + setattr(self, field, value) diff --git a/lib/iris/common/resolve.py b/lib/iris/common/resolve.py new file mode 100644 index 0000000000..87ad05791b --- /dev/null +++ b/lib/iris/common/resolve.py @@ -0,0 +1,2601 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Resolve metadata common between two cubes. + +Provides the infrastructure to support the analysis, identification and +combination of metadata common between two :class:`~iris.cube.Cube` +operands into a single resultant :class:`~iris.cube.Cube`, which will be +auto-transposed, and with the appropriate broadcast shape. + +""" + +from collections import namedtuple +from collections.abc import Iterable +from dataclasses import dataclass +import logging +from typing import Any + +from dask.array.core import broadcast_shapes +import numpy as np + +from ..config import get_logger +from . import LENIENT + +__all__ = ["Resolve"] + + +# Configure the logger. +logger = get_logger(__name__, fmt="[%(funcName)s]") + + +_AuxCoverage = namedtuple( + "_AuxCoverage", + [ + "cube", + "common_items_aux", + "common_items_scalar", + "local_items_aux", + "local_items_scalar", + "dims_common", + "dims_local", + "dims_free", + ], +) + +_CategoryItems = namedtuple( + "_CategoryItems", + ["items_dim", "items_aux", "items_scalar"], +) + +_DimCoverage = namedtuple( + "_DimCoverage", + ["cube", "metadata", "coords", "dims_common", "dims_local", "dims_free"], +) + +_Item = namedtuple("_Item", ["metadata", "coord", "dims"]) + +_PreparedFactory = namedtuple("_PreparedFactory", ["container", "dependencies"]) + + +@dataclass +class _PreparedItem: + metadata: Any + points: Any + bounds: Any + dims: Any + container: Any + mesh: Any = None + location: Any = None + axis: Any = None + + def create_coord(self, metadata): + from iris.mesh import MeshCoord + + if issubclass(self.container, MeshCoord): + # Make a MeshCoord, for which we have mesh/location/axis. + result = MeshCoord( + mesh=self.mesh, + location=self.location, + axis=self.axis, + ) + # Note: in this case we do also have "prepared metadata", but we + # do *not* assign it as we do for an 'ordinary' Coord. + # Instead, MeshCoord name/units/attributes are immutable, and set at + # create time to those of the underlying mesh node coordinate. + # cf https://github.com/SciTools/iris/issues/4670 + + else: + # make a regular coord, for which we have points/bounds/metadata. + result = self.container(self.points, bounds=self.bounds) + # Also assign prepared metadata. + result.metadata = metadata + + return result + + +_PreparedMetadata = namedtuple("_PreparedMetadata", ["combined", "src", "tgt"]) + + +class Resolve: + """Resolve the metadata of two cubes into one cube. + + At present, :class:`~iris.common.resolve.Resolve` is used by Iris solely + during cube maths to combine a left-hand :class:`~iris.cube.Cube` + operand and a right-hand :class:`~iris.cube.Cube` operand into a resultant + :class:`~iris.cube.Cube` with common metadata, suitably auto-transposed + dimensions, and an appropriate broadcast shape. + + However, the capability and benefit provided by :class:`~iris.common.resolve.Resolve` + may be exercised as a general means to easily and consistently combine the metadata + of two :class:`~iris.cube.Cube` operands together into a single resultant + :class:`~iris.cube.Cube`. This is highlighted through the following use case + patterns. + + Firstly, creating a ``resolver`` instance with *specific* :class:`~iris.cube.Cube` + operands, and then supplying ``data`` with suitable dimensionality and shape to + create the resultant resolved :class:`~iris.cube.Cube`, e.g., + + .. testsetup:: + + import iris + import numpy as np + from iris.common import Resolve + cube1 = iris.load_cube(iris.sample_data_path("A1B_north_america.nc")) + cube2 = iris.load_cube(iris.sample_data_path("E1_north_america.nc"))[0] + cube2.transpose() + cube3, cube4 = cube1, cube2 + data = np.zeros(cube1.shape) + data1 = data * 10 + data2 = data * 20 + data3 = data * 30 + + .. doctest:: + + >>> print(cube1) + air_temperature / (K) (time: 240; latitude: 37; longitude: 49) + Dimension coordinates: + time x - - + latitude - x - + longitude - - x + Auxiliary coordinates: + forecast_period x - - + Scalar coordinates: + forecast_reference_time 1859-09-01 06:00:00 + height 1.5 m + Cell methods: + 0 time: mean (interval: 6 hour) + Attributes: + Conventions 'CF-1.5' + Model scenario 'A1B' + STASH m01s03i236 + source 'Data from Met Office Unified Model 6.05' + + >>> print(cube2) + air_temperature / (K) (longitude: 49; latitude: 37) + Dimension coordinates: + longitude x - + latitude - x + Scalar coordinates: + forecast_period 10794 hours + forecast_reference_time 1859-09-01 06:00:00 + height 1.5 m + time 1860-06-01 00:00:00, bound=(1859-12-01 00:00:00, 1860-12-01 00:00:00) + Cell methods: + 0 time: mean (interval: 6 hour) + Attributes: + Conventions 'CF-1.5' + Model scenario 'E1' + STASH m01s03i236 + source 'Data from Met Office Unified Model 6.05' + + >>> print(data.shape) + (240, 37, 49) + >>> resolver = Resolve(cube1, cube2) + >>> result = resolver.cube(data) + >>> print(result) + air_temperature / (K) (time: 240; latitude: 37; longitude: 49) + Dimension coordinates: + time x - - + latitude - x - + longitude - - x + Auxiliary coordinates: + forecast_period x - - + Scalar coordinates: + forecast_reference_time 1859-09-01 06:00:00 + height 1.5 m + Cell methods: + 0 time: mean (interval: 6 hour) + Attributes: + Conventions 'CF-1.5' + STASH m01s03i236 + source 'Data from Met Office Unified Model 6.05' + + Secondly, creating an *empty* ``resolver`` instance, that may be called *multiple* + times with *different* :class:`~iris.cube.Cube` operands and *different* ``data``, + e.g., + + .. doctest:: + + >>> resolver = Resolve() + >>> result1 = resolver(cube1, cube2).cube(data1) + >>> result2 = resolver(cube3, cube4).cube(data2) + + Lastly, creating a ``resolver`` instance with *specific* :class:`~iris.cube.Cube` + operands, and then supply *different* ``data`` *multiple* times, e.g., + + >>> payload = (data1, data2, data3) + >>> resolver = Resolve(cube1, cube2) + >>> results = [resolver.cube(data) for data in payload] + + """ # noqa: D214, D406, D407, D410, D411 + + def __init__(self, lhs=None, rhs=None): + """Resolve the cube operands. + + Resolve the provided ``lhs`` :class:`~iris.cube.Cube` operand and + ``rhs`` :class:`~iris.cube.Cube` operand to determine the metadata + that is common between them, and the auto-transposed, broadcast shape + of the resultant :class:`~iris.cube.Cube`. + + This includes the identification of common :class:`~iris.common.metadata.CubeMetadata`, + :class:`~iris.coords.DimCoord`, :class:`~iris.coords.AuxCoord`, and + :class:`~iris.aux_factory.AuxCoordFactory` metadata. + + .. note:: + + Resolving common :class:`~iris.coords.AncillaryVariable` and + :class:`~iris.coords.CellMeasure` metadata is not supported at + this time. (:issue:`3839`) + + .. note:: + + A :class:`~iris.common.resolve.Resolve` instance is **callable**, + allowing two new ``lhs`` and ``rhs`` :class:`~iris.cube.Cube` operands + to be resolved. Note that, :class:`~iris.common.resolve.Resolve` only + supports resolving **two** operands at a time, and no more. + + .. warning:: + + :class:`~iris.common.resolve.Resolve` attempts to preserve commutativity, + but this may not be possible when auto-transposition or extended broadcasting + is involved during the operation. + + + Parameters + ---------- + lhs : :class:`~iris.cube.Cube`, optional + The left-hand-side :class:`~iris.cube.Cube` operand. + rhs : :class:`~iris.cube.Cube`, optional + The right-hand-side :class:`~iris.cube.Cube` operand. + + Examples + -------- + .. doctest:: + + >>> cube1 + + >>> cube2 + + >>> result1 = Resolve(cube1, cube2).cube(data) + >>> result2 = Resolve(cube2, cube1).cube(data) + >>> result1 == result2 + True + + """ + #: The ``lhs`` operand to be resolved into the resultant :class:`~iris.cube.Cube`. + self.lhs_cube = None # set in __call__ + #: The ``rhs`` operand to be resolved into the resultant :class:`~iris.cube.Cube`. + self.rhs_cube = None # set in __call__ + + #: The transposed/reshaped (if required) ``lhs`` :class:`~iris.cube.Cube`, which + #: can be broadcast with the ``rhs`` :class:`~iris.cube.Cube`. + self.lhs_cube_resolved = None + #: The transposed/reshaped (if required) ``rhs`` :class:`~iris.cube.Cube`, which + #: can be broadcast with the ``lhs`` :class:`~iris.cube.Cube`. + self.rhs_cube_resolved = None + + #: Categorised dim, aux and scalar coordinate items for ``lhs`` :class:`~iris.cube.Cube`. + self.lhs_cube_category = None # set in _metadata_resolve + #: Categorised dim, aux and scalar coordinate items for ``rhs`` :class:`~iris.cube.Cube`. + self.rhs_cube_category = None # set in _metadata_resolve + + #: Categorised dim, aux and scalar coordinate items **local** to the + #: ``lhs`` :class:`~iris.cube.Cube` only. + self.lhs_cube_category_local = None # set in _metadata_resolve + #: Categorised dim, aux and scalar coordinate items **local** to the + #: ``rhs`` :class:`~iris.cube.Cube` only. + self.rhs_cube_category_local = None # set in _metadata_resolve + #: Categorised dim, aux and scalar coordinate items **common** to both + #: the ``lhs`` :class:`~iris.cube.Cube` and the ``rhs`` :class:`~iris.cube.Cube`. + self.category_common = None # set in _metadata_resolve + + #: Analysis of dim coordinates spanning the ``lhs`` :class:`~iris.cube.Cube`. + self.lhs_cube_dim_coverage = None # set in _metadata_coverage + #: Analysis of aux and scalar coordinates spanning the ``lhs`` :class:`~iris.cube.Cube`. + self.lhs_cube_aux_coverage = None # set in _metadata_coverage + #: Analysis of dim coordinates spanning the ``rhs`` :class:`~iris.cube.Cube`. + self.rhs_cube_dim_coverage = None # set in _metadata_coverage + #: Analysis of aux and scalar coordinates spanning the ``rhs`` :class:`~iris.cube.Cube`. + self.rhs_cube_aux_coverage = None # set in _metadata_coverage + + #: Map **common** metadata from the ``rhs`` :class:`~iris.cube.Cube` to + #: the ``lhs`` :class:`~iris.cube.Cube` if ``lhs-rank`` >= ``rhs-rank``, + #: otherwise map **common** metadata from the ``lhs`` :class:`~iris.cube.Cube` + #: to the ``rhs`` :class:`~iris.cube.Cube`. + self.map_rhs_to_lhs = None # set in __call__ + + #: Mapping of the dimensions between **common** metadata for the :class:`~iris.cube.Cube` + #: operands, where the direction of the mapping is governed by + #: :attr:`~iris.common.resolve.Resolve.map_rhs_to_lhs`. + self.mapping = None # set in _metadata_mapping + + #: Cache containing a list of dim, aux and scalar coordinates prepared + #: and ready for creating and attaching to the resultant resolved + #: :class:`~iris.cube.Cube`. + self.prepared_category = None # set in _metadata_prepare + + #: Cache containing a list of aux factories prepared and ready for + #: creating and attaching to the resultant resolved + #: :class:`~iris.cube.Cube`. + self.prepared_factories = None # set in _metadata_prepare + + # The shape of the resultant resolved cube. + self._broadcast_shape = None # set in _as_compatible_cubes + + if lhs is not None or rhs is not None: + # Attempt to resolve the cube operands. + self(lhs, rhs) + + def __call__(self, lhs, rhs): + """Resolve the cube operands. + + Resolve the ``lhs`` :class:`~iris.cube.Cube` operand and ``rhs`` + :class:`~iris.cube.Cube` operand metadata. + + Involves determining all the common coordinate metadata shared between + the operands, and the metadata that is local to each operand. Given + the common metadata, the broadcast shape of the resultant resolved + :class:`~iris.cube.Cube`, which may be auto-transposed, can be + determined. + + Parameters + ---------- + lhs : :class:`~iris.cube.Cube` + The left-hand-side :class:`~iris.cube.Cube` operand. + rhs : :class:`~iris.cube.Cube` + The right-hand-side :class:`~iris.cube.Cube` operand. + + """ + from iris.cube import Cube + + emsg = "{cls} requires {arg!r} argument to be a 'Cube', got {actual!r}." + clsname = self.__class__.__name__ + + if not isinstance(lhs, Cube): + raise TypeError(emsg.format(cls=clsname, arg="LHS", actual=type(lhs))) + + if not isinstance(rhs, Cube): + raise TypeError(emsg.format(cls=clsname, arg="RHS", actual=type(rhs))) + + # Initialise the operand state. + self.lhs_cube = lhs + self.rhs_cube = rhs + + # Determine the initial direction to map operands. + # This may flip for operands with equal rank, particularly after + # later analysis informs the decision. + if self.lhs_cube.ndim >= self.rhs_cube.ndim: + self.map_rhs_to_lhs = True + else: + self.map_rhs_to_lhs = False + + self._metadata_resolve() + self._metadata_coverage() + + if self._debug: + self._debug_items(self.lhs_cube_category_local, title="LHS local") + self._debug_items(self.rhs_cube_category_local, title="RHS local") + self._debug_items(self.category_common, title="common") + logger.debug(f"map_rhs_to_lhs={self.map_rhs_to_lhs}") + + self._metadata_mapping() + self._metadata_prepare() + + return self + + def _as_compatible_cubes(self): + """Transpose and/or broadcast operands. + + Determine whether the ``src`` and ``tgt`` :class:`~iris.cube.Cube` can + be transposed and/or broadcast successfully together. + + If compatible, the ``_broadcast_shape`` of the resultant resolved cube is + calculated, and the ``_src_cube_resolved`` (transposed/broadcast ``src`` + cube) and ``_tgt_cube_resolved`` (same as the ``tgt`` cube) are + calculated. + + An exception will be raised if the ``src`` and ``tgt`` cannot be + broadcast, even after a suitable transpose has been performed. + + .. note:: + + Requires that **all** ``src`` cube dimensions have been mapped + successfully to an appropriate ``tgt`` cube dimension. + + """ + from iris.cube import Cube + + src_cube = self._src_cube + tgt_cube = self._tgt_cube + + assert src_cube.ndim == len(self.mapping) + + # Use the mapping to calculate the new src cube shape. + new_src_shape = [1] * tgt_cube.ndim + for src_dim, tgt_dim in self.mapping.items(): + new_src_shape[tgt_dim] = src_cube.shape[src_dim] + new_src_shape = tuple(new_src_shape) + dmsg = ( + f"new src {self._src_cube_position} cube shape {new_src_shape}, " + f"actual shape {src_cube.shape}" + ) + logger.debug(dmsg) + + try: + # Determine whether the tgt cube shape and proposed new src + # cube shape will successfully broadcast together. + self._broadcast_shape = broadcast_shapes(tgt_cube.shape, new_src_shape) + except ValueError: + emsg = ( + "Cannot resolve cubes, as a suitable transpose of the " + f"{self._src_cube_position} cube {src_cube.name()!r} " + f"will not broadcast with the {self._tgt_cube_position} cube " + f"{tgt_cube.name()!r}." + ) + raise ValueError(emsg) + + new_src_data = src_cube.core_data().copy() + + # Use the mapping to determine the transpose sequence of + # src dimensions in increasing tgt dimension order. + order = [ + src_dim + for src_dim, tgt_dim in sorted( + self.mapping.items(), key=lambda pair: pair[1] + ) + ] + + # Determine whether a transpose of the src cube is necessary. + if order != sorted(order): + new_src_data = new_src_data.transpose(order) + logger.debug( + f"transpose src {self._src_cube_position} cube with order {order}" + ) + + # Determine whether a reshape is necessary. + if new_src_shape != new_src_data.shape: + new_src_data = new_src_data.reshape(new_src_shape) + logger.debug( + f"reshape src {self._src_cube_position} cube to new shape {new_src_shape}" + ) + + # Create the new src cube. + new_src_cube = Cube(new_src_data) + new_src_cube.metadata = src_cube.metadata + + def add_coord(coord, dim_coord=False): + src_dims = src_cube.coord_dims(coord) + tgt_dims = [self.mapping[src_dim] for src_dim in src_dims] + if dim_coord: + new_src_cube.add_dim_coord(coord, tgt_dims) + else: + new_src_cube.add_aux_coord(coord, tgt_dims) + + # Add the dim coordinates to the new src cube. + for coord in src_cube.dim_coords: + add_coord(coord, dim_coord=True) + + # Add the aux and scalar coordinates to the new src cube. + for coord in src_cube.aux_coords: + add_coord(coord) + + # Add the aux factories to the new src cube. + for factory in src_cube.aux_factories: + new_src_cube.add_aux_factory(factory) + + # Set the resolved cubes. + self._src_cube_resolved = new_src_cube + self._tgt_cube_resolved = tgt_cube + + @staticmethod + def _aux_coverage( + cube, + cube_items_aux, + cube_items_scalar, + common_aux_metadata, + common_scalar_metadata, + ): + """Perform auxiliary coordinate coverage. + + Determine the dimensions covered by each of the local and common + auxiliary coordinates of the provided :class:`~iris.cube.Cube`. + + The cube dimensions not covered by any of the auxiliary coordinates is + also determined; these are known as `free` dimensions. + + The scalar coordinates local to the cube are also determined. + + Parameters + ---------- + cube : :class:`~iris.cube.Cube` + The :class:`~iris.cube.Cube` to be analysed for coverage. + cube_items_aux : + The list of associated :class:`~iris.common.resolve._Item` metadata + for each auxiliary coordinate owned by the cube. + cube_items_scalar : + The list of associated :class:`~iris.common.resolve._Item` metadata + for each scalar coordinate owned by the cube. + common_aux_metadata : + The list of common auxiliary coordinate metadata shared by both + the LHS and RHS cube operands being resolved. + common_scalar_metadata : + The list of common scalar coordinate metadata shared by both + the LHS and RHS cube operands being resolved. + + Returns + ------- + :class:`~iris.common.resolve._AuxCoverage` + + """ + common_items_aux = [] + common_items_scalar = [] + local_items_aux = [] + local_items_scalar = [] + dims_common = [] + dims_local = [] + dims_free = set(range(cube.ndim)) + + for item in cube_items_aux: + [dims_free.discard(dim) for dim in item.dims] + + if item.metadata in common_aux_metadata: + common_items_aux.append(item) + dims_common.extend(item.dims) + else: + local_items_aux.append(item) + dims_local.extend(item.dims) + + for item in cube_items_scalar: + if item.metadata in common_scalar_metadata: + common_items_scalar.append(item) + else: + local_items_scalar.append(item) + + return _AuxCoverage( + cube=cube, + common_items_aux=common_items_aux, + common_items_scalar=common_items_scalar, + local_items_aux=local_items_aux, + local_items_scalar=local_items_scalar, + dims_common=sorted(set(dims_common)), + dims_local=sorted(set(dims_local)), + dims_free=sorted(dims_free), + ) + + @staticmethod + def _aux_mapping(src_coverage, tgt_coverage): + """Perform auxiliary coordinate dimension mapping. + + Establish the mapping of dimensions from the ``src`` to ``tgt`` + :class:`~iris.cube.Cube` using the auxiliary coordinate metadata + common between each of the operands. + + The ``src`` to ``tgt`` common auxiliary coordinate mapping is held by + the :attr:`~iris.common.resolve.Resolve.mapping`. + + Parameters + ---------- + src_coverage : + The :class:`~iris.common.resolve._DimCoverage` of the ``src`` + :class:`~iris.cube.Cube` i.e., map from the common ``src`` + dimensions. + tgt_coverage : + The :class:`~iris.common.resolve._DimCoverage` of the ``tgt`` + :class:`~iris.cube.Cube` i.e., map to the common ``tgt`` + dimensions. + + Returns + ------- + dict of ``src`` to ``tgt`` dimension mapping. + + """ + mapping = {} + for tgt_item in tgt_coverage.common_items_aux: + # Search for a src aux metadata match. + tgt_metadata = tgt_item.metadata + src_items = tuple( + filter( + lambda src_item: src_item.metadata == tgt_metadata, + src_coverage.common_items_aux, + ) + ) + if src_items: + # Multiple matching src metadata must cover the same src + # dimensions. + src_dims = src_items[0].dims + if all(map(lambda item: item.dims == src_dims, src_items)): + # Ensure src and tgt have equal rank. + tgt_dims = tgt_item.dims + if len(src_dims) == len(tgt_dims): + for src_dim, tgt_dim in zip(src_dims, tgt_dims): + mapping[src_dim] = tgt_dim + logger.debug(f"{src_dim}->{tgt_dim}") + else: + # This situation can only occur due to a systemic internal + # failure to correctly identify common aux coordinate metadata + # coverage between the cubes. + emsg = ( + "Failed to map common aux coordinate metadata from " + "source cube {!r} to target cube {!r}, using {!r} on " + "target cube dimension{} {}." + ) + raise ValueError( + emsg.format( + src_coverage.cube.name(), + tgt_coverage.cube.name(), + tgt_metadata, + "s" if len(tgt_item.dims) > 1 else "", + tgt_item.dims, + ) + ) + return mapping + + @staticmethod + def _categorise_items(cube): + """Categorise the cube metadata. + + Inspect the provided :class:`~iris.cube.Cube` and group its + coordinates and associated metadata into dimension, auxiliary and + scalar categories. + + Parameters + ---------- + cube : :class:`~iris.cube.Cube` + The :class:`~iris.cube.Cube` that will have its coordinates and + metadata grouped into their associated dimension, auxiliary and + scalar categories. + + Returns + ------- + :class:`~iris.common.resolve._CategoryItems` + + """ + category = _CategoryItems(items_dim=[], items_aux=[], items_scalar=[]) + + # Categorise the dim coordinates of the cube. + for coord in cube.dim_coords: + item = _Item( + metadata=coord.metadata, + coord=coord, + dims=cube.coord_dims(coord), + ) + category.items_dim.append(item) + + # Categorise the aux and scalar coordinates of the cube. + for coord in cube.aux_coords: + dims = cube.coord_dims(coord) + item = _Item(metadata=coord.metadata, coord=coord, dims=dims) + if dims: + category.items_aux.append(item) + else: + category.items_scalar.append(item) + + return category + + @staticmethod + def _create_prepared_item( + coord, + dims, + src_metadata=None, + tgt_metadata=None, + points=None, + bounds=None, + container=None, + ): + """Package metadata in preparation for resolution. + + Convenience method that creates a :class:`~iris.common.resolve._PreparedItem` + containing the data and metadata required to construct and attach a coordinate + to the resultant resolved cube. + + Parameters + ---------- + coord : + The coordinate with the ``points`` and ``bounds`` to be extracted. + dims : int or tuple + The dimensions that the ``coord`` spans on the resulting resolved + :class:`~iris.cube.Cube`. + (Can also be a single dimension number). + src_metadata : optional + The coordinate metadata from the ``src`` :class:`~iris.cube.Cube`. + tgt_metadata : optional + The coordinate metadata from the ``tgt`` :class:`~iris.cube.Cube`. + points : optional + Override points array. When not given, use coord.points. + bounds : optional + Override bounds array. When not given, use coord.bounds. + container : optional + Override coord type (class constructor). + When not given, use type(coord). + + Returns + ------- + :class:`~iris.common.resolve._PreparedItem`. + + Notes + ----- + .. note:: + + If container or type(coord) is DimCoord/AuxCoord (i.e. not + MeshCoord), then points+bounds define the built AuxCoord/DimCoord. + These points+bounds come either from those args, or the 'coord'. + Alternatively, when container or type(coord) is MeshCoord, then + points==bounds==None and the preparted item contains + mesh/location/axis properties for the resulting MeshCoord. + These don't have override args: they *always* come from 'coord'. + + """ + if not isinstance(dims, Iterable): + dims = (dims,) + + if src_metadata is not None and tgt_metadata is not None: + combined = src_metadata.combine(tgt_metadata) + else: + combined = src_metadata or tgt_metadata + prepared_metadata = _PreparedMetadata( + combined=combined, src=src_metadata, tgt=tgt_metadata + ) + + if container is None: + container = type(coord) + + from iris.mesh import MeshCoord + + if issubclass(container, MeshCoord): + # Build a prepared-item to make a MeshCoord. + # This case does *NOT* use points + bounds, so alternatives to the + # coord content should not have been specified by the caller. + assert points is None and bounds is None + mesh = coord.mesh + location = coord.location + axis = coord.axis + + else: + # Build a prepared-item to make a DimCoord or AuxCoord. + + # mesh/location/axis are not used. + mesh = None + location = None + axis = None + + # points + bounds default to those from the coordinate, but + # alternative values may be specified. + if points is None: + points = coord.points + bounds = coord.bounds + # 'ELSE' points was passed: both points+bounds come from the args + + # Always *copy* points+bounds, to avoid any possible direct (shared) + # references to existing coord arrays. + points = points.copy() + if bounds is not None: + bounds = bounds.copy() + + result = _PreparedItem( + metadata=prepared_metadata, + dims=dims, + points=points, + bounds=bounds, + mesh=mesh, + location=location, + axis=axis, + container=container, + ) + return result + + @property + def _debug(self): + result = False + level = logger.getEffectiveLevel() + if level != logging.NOTSET: + result = logging.DEBUG >= level + return result + + @staticmethod + def _debug_items(items, title=None): + def _show(items, heading): + logger.debug(f"{title}{heading}:") + for item in items: + dmsg = f"metadata={item.metadata}, dims={item.dims}, bounds={item.coord.has_bounds()}" + logger.debug(dmsg) + + title = f"{title} " if title else "" + _show(items.items_dim, "dim") + _show(items.items_aux, "aux") + _show(items.items_scalar, "scalar") + + @staticmethod + def _dim_coverage(cube, cube_items_dim, common_dim_metadata): + """Perform dimension coordinate coverage. + + Determine the dimensions covered by each of the local and common + dimension coordinates of the provided :class:`~iris.cube.Cube`. + + The cube dimensions not covered by any of the dimension coordinates is + also determined; these are known as `free` dimensions. + + Parameters + ---------- + cube : :class:`~iris.cube.Cube` + The :class:`~iris.cube.Cube` to be analysed for coverage. + cube_items_dim : + The list of associated :class:`~iris.common.resolve._Item` metadata + for each dimension coordinate owned by the cube. + common_dim_metadata : + The list of common dimension coordinate metadata shared by both + the LHS and RHS cube operands being resolved. + + Returns + ------- + :class:`~iris.common.resolve._DimCoverage` + + """ + ndim = cube.ndim + metadata = [None] * ndim + coords = [None] * ndim + dims_common = [] + dims_local = [] + dims_free = set(range(ndim)) + + for item in cube_items_dim: + (dim,) = item.dims + dims_free.discard(dim) + metadata[dim] = item.metadata + coords[dim] = item.coord + if item.metadata in common_dim_metadata: + dims_common.append(dim) + else: + dims_local.append(dim) + + return _DimCoverage( + cube=cube, + metadata=metadata, + coords=coords, + dims_common=sorted(dims_common), + dims_local=sorted(dims_local), + dims_free=sorted(dims_free), + ) + + @staticmethod + def _dim_mapping(src_coverage, tgt_coverage): + """Perform dimension coordinate dimension mapping. + + Establish the mapping of dimensions from the ``src`` to ``tgt`` + :class:`~iris.cube.Cube` using the dimension coordinate metadata + common between each of the operands. + + The ``src`` to ``tgt`` common dimension coordinate mapping is held by + the :attr:`~iris.common.resolve.Resolve.mapping`. + + Parameters + ---------- + src_coverage : + The :class:`~iris.common.resolve._DimCoverage` of the ``src`` + :class:`~iris.cube.Cube` i.e., map from the common ``src`` + dimensions. + tgt_coverage : + The :class:`~iris.common.resolve._DimCoverage` of the ``tgt`` + :class:`~iris.cube.Cube` i.e., map to the common ``tgt`` + dimensions. + + Returns + ------- + dict of ``src`` to ``tgt`` dimension mapping. + + """ + mapping = {} + for tgt_dim in tgt_coverage.dims_common: + # Search for a src dim metadata match. + tgt_metadata = tgt_coverage.metadata[tgt_dim] + try: + src_dim = src_coverage.metadata.index(tgt_metadata) + mapping[src_dim] = tgt_dim + logger.debug(f"{src_dim}->{tgt_dim}") + except ValueError: + # This exception can only occur due to a systemic internal + # failure to correctly identify common dim coordinate metadata + # coverage between the cubes. + emsg = ( + "Failed to map common dim coordinate metadata from " + "source cube {!r} to target cube {!r}, using {!r} on " + "target cube dimension {}." + ) + raise ValueError( + emsg.format( + src_coverage.cube.name(), + tgt_coverage.cube.name(), + tgt_metadata, + (tgt_dim,), + ) + ) + return mapping + + def _free_mapping( + self, + src_dim_coverage, + tgt_dim_coverage, + src_aux_coverage, + tgt_aux_coverage, + ): + """Associate free dimensions to covered dimensions. + + Attempt to update the :attr:`~iris.common.resolve.Resolve.mapping` with + ``src`` to ``tgt`` :class:`~iris.cube.Cube` mappings from unmapped ``src`` + dimensions that are free from coordinate metadata coverage to ``tgt`` + dimensions that have local metadata coverage (i.e., is not common between + the ``src`` and ``tgt``) or dimensions that are free from coordinate + metadata coverage. + + If the ``src`` :class:`~iris.cube.Cube` does not have any free dimensions, + the attempt to map unmapped ``tgt`` dimensions that have local metadata + coverage to ``src`` dimensions that are free from coordinate metadata + coverage. + + An exception will be raised if there are any ``src`` :class:`~iris.cube.Cube` + dimensions not mapped to an associated ``tgt`` dimension. + + Parameters + ---------- + src_dim_coverage : + The :class:`~iris.common.resolve.._DimCoverage` of the ``src`` + :class:`~iris.cube.Cube`. + tgt_dim_coverage : + The :class:`~iris.common.resolve.._DimCoverage` of the ``tgt`` + :class:`~iris.cube.Cube`. + src_aux_coverage : + The :class:`~iris.common.resolve._AuxCoverage` of the ``src`` + :class:`~iris.cube.Cube`. + tgt_aux_coverage : + The :class:`~iris.common.resolve._AuxCoverage` of the ``tgt`` + :class:`~iris.cube.Cube`. + + Notes + ----- + .. note:: + + All unmapped dimensions with an extend >1 are mapped before those + with an extent of 1, as such dimensions cannot be broadcast. It + is important to map specific non-broadcastable dimensions before + generic broadcastable dimensions otherwise we are open to failing to + map all the src dimensions as a generic src broadcast dimension has + been mapped to the only tgt dimension that a specific non-broadcastable + dimension can be mapped to. + + .. note:: + + A local dimension cannot be mapped to another local dimension, + by definition, otherwise this dimension would be classed as a + common dimension. + + """ + src_cube = src_dim_coverage.cube + tgt_cube = tgt_dim_coverage.cube + src_ndim = src_cube.ndim + tgt_ndim = tgt_cube.ndim + + # mapping src to tgt, involving free dimensions on either the src/tgt. + free_mapping = {} + + # Determine the src/tgt dimensions that are not mapped, + # and not covered by any metadata. + src_free = set(src_dim_coverage.dims_free) & set(src_aux_coverage.dims_free) + tgt_free = set(tgt_dim_coverage.dims_free) & set(tgt_aux_coverage.dims_free) + + if src_free or tgt_free: + # Determine the src/tgt dimensions that are not mapped. + src_unmapped = set(range(src_ndim)) - set(self.mapping) + tgt_unmapped = set(range(tgt_ndim)) - set(self.mapping.values()) + + # Determine the src/tgt dimensions that are not mapped, + # but are covered by a src/tgt local coordinate. + src_unmapped_local = src_unmapped - src_free + tgt_unmapped_local = tgt_unmapped - tgt_free + + src_shape = src_cube.shape + tgt_shape = tgt_cube.shape + src_max, tgt_max = max(src_shape), max(tgt_shape) + + def _assign_mapping(extent, unmapped_local_items, free_items=None): + result = None + if free_items is None: + free_items = [] + if extent == 1: + # Map to the first available unmapped local dimension or + # the first available free dimension. + # Dimension shape doesn't matter here as the extent is 1, + # therefore broadcasting will take care of any discrepancy + # between src and tgt dimension extent. + if unmapped_local_items: + result, _ = unmapped_local_items.pop(0) + elif free_items: + result, _ = free_items.pop(0) + else: + + def _filter(items): + return list(filter(lambda item: item[1] == extent, items)) + + def _pop(item, items): + dim, _ = item + index = items.index(item) + items.pop(index) + return dim + + items = _filter(unmapped_local_items) + if items: + result = _pop(items[0], unmapped_local_items) + else: + items = _filter(free_items) + if items: + result = _pop(items[0], free_items) + return result + + if src_free: + # Attempt to map src free dimensions to tgt unmapped local or free dimensions. + tgt_unmapped_local_items = [ + (dim, tgt_shape[dim]) for dim in tgt_unmapped_local + ] + tgt_free_items = [(dim, tgt_shape[dim]) for dim in tgt_free] + # Sort by decreasing src dimension extent and increasing src dimension + # as we want broadcast src dimensions to be mapped last. + src_key_func = lambda dim: (src_max - src_shape[dim], dim) + + for src_dim in sorted(src_free, key=src_key_func): + tgt_dim = _assign_mapping( + src_shape[src_dim], + tgt_unmapped_local_items, + tgt_free_items, + ) + if tgt_dim is None: + # Failed to map the src free dimension + # to a suitable tgt local/free dimension. + dmsg = ( + f"failed to map src free dimension ({src_dim},) from " + f"{self._src_cube_position} cube {src_cube.name()!r} to " + f"{self._tgt_cube_position} cube {tgt_cube.name()!r}." + ) + logger.debug(dmsg) + break + free_mapping[src_dim] = tgt_dim + else: + # Attempt to map tgt free dimensions to src unmapped local dimensions. + src_unmapped_local_items = [ + (dim, src_shape[dim]) for dim in src_unmapped_local + ] + # Sort by decreasing tgt dimension extent and increasing tgt dimension + # as we want broadcast tgt dimensions to be mapped last. + tgt_key_func = lambda dim: (tgt_max - tgt_shape[dim], dim) + + for tgt_dim in sorted(tgt_free, key=tgt_key_func): + src_dim = _assign_mapping( + tgt_shape[tgt_dim], src_unmapped_local_items + ) + if src_dim is not None: + free_mapping[src_dim] = tgt_dim + if not src_unmapped_local_items: + # There are no more src unmapped local dimensions. + break + + # Determine whether there are still unmapped src dimensions. + src_unmapped = set(range(src_cube.ndim)) - set(self.mapping) - set(free_mapping) + + if src_unmapped: + plural = "s" if len(src_unmapped) > 1 else "" + emsg = ( + "Insufficient matching coordinate metadata to resolve cubes, " + f"cannot map dimension{plural} {tuple(sorted(src_unmapped))} " + f"of the {self._src_cube_position} cube {src_cube.name()!r} " + f"to the {self._tgt_cube_position} cube {tgt_cube.name()!r}." + ) + raise ValueError(emsg) + + # Update the mapping. + self.mapping.update(free_mapping) + logger.debug(f"mapping free dimensions gives, mapping={self.mapping}") + + def _metadata_coverage(self): + """Determine free and covered dimensions. + + Using the pre-categorised metadata of the cubes, determine the dimensions + covered by their associated dimension and auxiliary coordinates, and which + dimensions are free of metadata coverage. + + This coverage analysis clarifies how the dimensions covered by common + metadata are related, thus establishing a dimensional mapping between + the cubes. It also identifies the dimensions covered by metadata that + is local to each cube, and indeed which dimensions are free of metadata. + + """ + # Determine the common dim coordinate metadata coverage. + common_dim_metadata = [item.metadata for item in self.category_common.items_dim] + + self.lhs_cube_dim_coverage = self._dim_coverage( + self.lhs_cube, + self.lhs_cube_category.items_dim, + common_dim_metadata, + ) + self.rhs_cube_dim_coverage = self._dim_coverage( + self.rhs_cube, + self.rhs_cube_category.items_dim, + common_dim_metadata, + ) + + # Determine the common aux and scalar coordinate metadata coverage. + common_aux_metadata = [item.metadata for item in self.category_common.items_aux] + common_scalar_metadata = [ + item.metadata for item in self.category_common.items_scalar + ] + + self.lhs_cube_aux_coverage = self._aux_coverage( + self.lhs_cube, + self.lhs_cube_category.items_aux, + self.lhs_cube_category.items_scalar, + common_aux_metadata, + common_scalar_metadata, + ) + self.rhs_cube_aux_coverage = self._aux_coverage( + self.rhs_cube, + self.rhs_cube_category.items_aux, + self.rhs_cube_category.items_scalar, + common_aux_metadata, + common_scalar_metadata, + ) + + def _metadata_mapping(self): + """Identify equivalent dimensions using metadata. + + Ensure that each ``src`` :class:`~iris.cube.Cube` dimension is mapped to an + associated ``tgt`` :class:`~iris.cube.Cube` dimension using the common dim + and aux coordinate metadata. + + If the common metadata does not result in a full mapping of ``src`` to ``tgt`` + dimensions then free dimensions are analysed to determine whether the mapping + can be completed. + + Once the ``src`` has been mapped to the ``tgt``, the cubes are checked to + ensure that they will successfully broadcast, and the ``src`` + :class:`~iris.cube.Cube` is transposed appropriately, if necessary. + + The :attr:`~iris.common.resolve.Resolve._broadcast_shape` is set, along with + the :attr:`~iris.common.resolve.Resolve._src_cube_resolved` and + :attr:`~iris.common.resolve.Resolve._tgt_cube_resolved`, + which are the broadcast/transposed ``src`` and ``tgt``. + + .. note:: + + An exception will be raised if a ``src`` dimension cannot be mapped to + a ``tgt`` dimension. + + .. note:: + + An exception will be raised if the full mapped ``src`` + :class:`~iris.cube.Cube` cannot be broadcast or transposed with the + ``tgt`` :class:`~iris.cube.Cube`. + + .. note:: + + The ``src`` and ``tgt`` may be swapped in the case where they both have + equal dimensionality and the ``tgt`` does have the same shape as the + resolved broadcast shape (and the ``src`` does) or the ``tgt`` has more + free dimensions than the ``src``. + + """ + # Initialise the state. + self.mapping = {} + + # Map RHS cube to LHS cube, or smaller to larger cube rank. + if self.map_rhs_to_lhs: + src_cube = self.rhs_cube + src_dim_coverage = self.rhs_cube_dim_coverage + src_aux_coverage = self.rhs_cube_aux_coverage + tgt_cube = self.lhs_cube + tgt_dim_coverage = self.lhs_cube_dim_coverage + tgt_aux_coverage = self.lhs_cube_aux_coverage + else: + src_cube = self.lhs_cube + src_dim_coverage = self.lhs_cube_dim_coverage + src_aux_coverage = self.lhs_cube_aux_coverage + tgt_cube = self.rhs_cube + tgt_dim_coverage = self.rhs_cube_dim_coverage + tgt_aux_coverage = self.rhs_cube_aux_coverage + + # Use the dim coordinates to fully map the + # src cube dimensions to the tgt cube dimensions. + self.mapping.update(self._dim_mapping(src_dim_coverage, tgt_dim_coverage)) + logger.debug(f"mapping common dim coordinates gives, mapping={self.mapping}") + + # If necessary, use the aux coordinates to fully map the + # src cube dimensions to the tgt cube dimensions. + if not self.mapped: + self.mapping.update(self._aux_mapping(src_aux_coverage, tgt_aux_coverage)) + logger.debug(f"mapping common aux coordinates, mapping={self.mapping}") + + if not self.mapped: + # Attempt to complete the mapping using src/tgt free dimensions. + # Note that, this may not be possible and result in an exception. + self._free_mapping( + src_dim_coverage, + tgt_dim_coverage, + src_aux_coverage, + tgt_aux_coverage, + ) + + # Attempt to transpose/reshape the cubes into compatible broadcast shapes. + # Note that, this may not be possible and result in an exception. + self._as_compatible_cubes() + + # Given the resultant broadcast shape, determine whether the + # mapping requires to be reversed. + # Only applies to equal src/tgt dimensionality. + broadcast_flip = ( + src_cube.ndim == tgt_cube.ndim + and self._tgt_cube_resolved.shape != self.shape + and self._src_cube_resolved.shape == self.shape + ) + + # Given the number of free dimensions, determine whether the + # mapping requires to be reversed. + # Only applies to equal src/tgt dimensionality. + src_free = set(src_dim_coverage.dims_free) & set(src_aux_coverage.dims_free) + tgt_free = set(tgt_dim_coverage.dims_free) & set(tgt_aux_coverage.dims_free) + free_flip = src_cube.ndim == tgt_cube.ndim and len(tgt_free) > len(src_free) + + # Reverse the mapping direction. + if broadcast_flip or free_flip: + flip_mapping = { + tgt_dim: src_dim for src_dim, tgt_dim in self.mapping.items() + } + self.map_rhs_to_lhs = not self.map_rhs_to_lhs + dmsg = ( + f"reversing the mapping from {self.mapping} to {flip_mapping}, " + f"now map_rhs_to_lhs={self.map_rhs_to_lhs}" + ) + logger.debug(dmsg) + self.mapping = flip_mapping + # Now require to transpose/reshape the cubes into compatible + # broadcast cubes again, due to possible non-commutative behaviour + # after reversing the mapping direction. + self._as_compatible_cubes() + + def _metadata_prepare(self): + """Consolidate metadata for resolved cube. + + Populate the :attr:`~iris.common.resolve.Resolve.prepared_category` and + :attr:`~iris.common.resolve.Resolve.prepared_factories` with the necessary metadata to be constructed + and attached to the resulting resolved :class:`~iris.cube.Cube`. + + """ + # Initialise the state. + self.prepared_category = _CategoryItems( + items_dim=[], items_aux=[], items_scalar=[] + ) + self.prepared_factories = [] + + # Map RHS cube to LHS cube, or smaller to larger cube rank. + if self.map_rhs_to_lhs: + src_cube = self.rhs_cube + src_category_local = self.rhs_cube_category_local + src_dim_coverage = self.rhs_cube_dim_coverage + src_aux_coverage = self.rhs_cube_aux_coverage + tgt_cube = self.lhs_cube + tgt_category_local = self.lhs_cube_category_local + tgt_dim_coverage = self.lhs_cube_dim_coverage + tgt_aux_coverage = self.lhs_cube_aux_coverage + else: + src_cube = self.lhs_cube + src_category_local = self.lhs_cube_category_local + src_dim_coverage = self.lhs_cube_dim_coverage + src_aux_coverage = self.lhs_cube_aux_coverage + tgt_cube = self.rhs_cube + tgt_category_local = self.rhs_cube_category_local + tgt_dim_coverage = self.rhs_cube_dim_coverage + tgt_aux_coverage = self.rhs_cube_aux_coverage + + # Determine the resultant cube dim coordinate/s. + self._prepare_common_dim_payload(src_dim_coverage, tgt_dim_coverage) + + # Determine the resultant cube aux coordinate/s. + self._prepare_common_aux_payload( + src_aux_coverage.common_items_aux, # input + tgt_aux_coverage.common_items_aux, # input + self.prepared_category.items_aux, # output + ) + + # Determine the resultant cube scalar coordinate/s. + self._prepare_common_aux_payload( + src_aux_coverage.common_items_scalar, # input + tgt_aux_coverage.common_items_scalar, # input + self.prepared_category.items_scalar, # output + ignore_mismatch=True, + ) + + self._prepare_local_payload( + src_dim_coverage, + src_aux_coverage, + tgt_dim_coverage, + tgt_aux_coverage, + ) + + self._prepare_factory_payload(tgt_cube, tgt_category_local, from_src=False) + self._prepare_factory_payload(src_cube, src_category_local) + + def _metadata_resolve(self): + """Categorise the coordinate metadata. + + Categorise the coordinate metadata of the cubes into three distinct + groups; metadata from coordinates only available (local) on the LHS + cube, metadata from coordinates only available (local) on the RHS + cube, and metadata from coordinates common to both the LHS and RHS + cubes. + + This is only applicable to coordinates that are members of the + 'aux_coords' or 'dim_coords' of the participating cubes. + + """ + # Determine the cube dim, aux and scalar coordinate items + # for each individual cube. + self.lhs_cube_category = self._categorise_items(self.lhs_cube) + self.rhs_cube_category = self._categorise_items(self.rhs_cube) + + # Categorised dim, aux and scalar coordinate items local to LHS cube only. + self.lhs_cube_category_local = _CategoryItems( + items_dim=[], items_aux=[], items_scalar=[] + ) + # Categorised dim, aux and scalar coordinate items local to RHS cube only. + self.rhs_cube_category_local = _CategoryItems( + items_dim=[], items_aux=[], items_scalar=[] + ) + # Categorised dim, aux and scalar coordinate items common to both + # LHS cube and RHS cube. + self.category_common = _CategoryItems( + items_dim=[], items_aux=[], items_scalar=[] + ) + + def _categorise( + lhs_items, + rhs_items, + lhs_local_items, + rhs_local_items, + common_items, + ): + rhs_items_metadata = [item.metadata for item in rhs_items] + # Track common metadata here as a temporary convenience. + common_metadata = [] + + # Determine items local to the lhs, and shared items + # common to both lhs and rhs. + for item in lhs_items: + metadata = item.metadata + if metadata in rhs_items_metadata: + # The metadata is common between lhs and rhs. + if metadata not in common_metadata: + common_items.append(item) + common_metadata.append(metadata) + else: + # The metadata is local to the lhs. + lhs_local_items.append(item) + + # Determine items local to the rhs. + for item in rhs_items: + if item.metadata not in common_metadata: + rhs_local_items.append(item) + + # Determine local and common dim category items. + _categorise( + self.lhs_cube_category.items_dim, # input + self.rhs_cube_category.items_dim, # input + self.lhs_cube_category_local.items_dim, # output + self.rhs_cube_category_local.items_dim, # output + self.category_common.items_dim, # output + ) + + # Determine local and common aux category items. + _categorise( + self.lhs_cube_category.items_aux, # input + self.rhs_cube_category.items_aux, # input + self.lhs_cube_category_local.items_aux, # output + self.rhs_cube_category_local.items_aux, # output + self.category_common.items_aux, # output + ) + + # Determine local and common scalar category items. + _categorise( + self.lhs_cube_category.items_scalar, # input + self.rhs_cube_category.items_scalar, # input + self.lhs_cube_category_local.items_scalar, # output + self.rhs_cube_category_local.items_scalar, # output + self.category_common.items_scalar, # output + ) + + # Sort the resultant categories by metadata name for consistency, + # in-place. + categories = ( + self.lhs_cube_category, + self.rhs_cube_category, + self.lhs_cube_category_local, + self.rhs_cube_category_local, + self.category_common, + ) + key_func = lambda item: item.metadata.name() + + for category in categories: + category.items_dim.sort(key=key_func) + category.items_aux.sort(key=key_func) + category.items_scalar.sort(key=key_func) + + def _prepare_common_aux_payload( + self, + src_common_items, + tgt_common_items, + prepared_items, + ignore_mismatch=None, + ): + """Consolidate common auxiliary coordinate metadata. + + Populate the ``prepared_items`` with a :class:`~iris.common.resolve._PreparedItem` containing + the necessary metadata for each auxiliary coordinate to be constructed and attached to the + resulting resolved :class:`~iris.cube.Cube`. + + .. note:: + + For mixed ``src`` and ``tgt`` coordinate types with matching metadata, an + :class:`~iris.coords.AuxCoord` will be nominated for construction. + + Parameters + ---------- + src_common_items : + The list of :attr:`~iris.common.resolve._AuxCoverage.common_items_aux` metadata + for the ``src`` :class:`~iris.cube.Cube`. + tgt_common_items : + The list of :attr:`~iris.common.resolve._AuxCoverage.common_items_aux` metadata + for the ``tgt`` :class:`~iris.cube.Cube`. + prepared_items : + The list of :class:`~iris.common.resolve._PreparedItem` metadata that will be used + to construct the auxiliary coordinates that will be attached to the resulting + resolved :class:`~iris.cube.Cube`. + ignore_mismatch : optional + When ``False``, an exception will be raised if a difference is detected between corresponding + ``src`` and ``tgt`` coordinate ``points`` and/or ``bounds``. + When ``True``, the coverage metadata is ignored i.e., a coordinate will not be constructed and + added to the resulting resolved :class:`~iris.cube.Cube`. + Defaults to ``False``. + + """ + from iris.coords import AuxCoord + + if ignore_mismatch is None: + # Configure ability to ignore coordinate points/bounds + # mismatches between common items. + ignore_mismatch = False + + for src_item in src_common_items: + src_metadata = src_item.metadata + tgt_items = tuple( + filter( + lambda tgt_item: tgt_item.metadata == src_metadata, + tgt_common_items, + ) + ) + if not tgt_items: + dmsg = ( + f"ignoring src {self._src_cube_position} cube aux coordinate " + f"{src_metadata}, does not match any common tgt " + f"{self._tgt_cube_position} cube aux coordinate metadata" + ) + logger.debug(dmsg) + elif len(tgt_items) > 1: + dmsg = ( + f"ignoring src {self._src_cube_position} cube aux coordinate " + f"{src_metadata}, matches multiple [{len(tgt_items)}] common " + f"tgt {self._tgt_cube_position} cube aux coordinate metadata" + ) + logger.debug(dmsg) + else: + (tgt_item,) = tgt_items + src_coord = src_item.coord + tgt_coord = tgt_item.coord + + prepared_item = None + src_is_mesh, tgt_is_mesh = [ + hasattr(coord, "mesh") for coord in (src_coord, tgt_coord) + ] + if src_is_mesh and tgt_is_mesh: + # MeshCoords are a bit "special" ... + # In this case, we may need to produce an alternative form + # to the 'ordinary' _PreparedItem + # However, this only works if they have identical meshes.. + if src_coord == tgt_coord: + prepared_item = self._create_prepared_item( + src_coord, + tgt_item.dims, + src_metadata=src_metadata, + tgt_metadata=tgt_item.metadata, + ) + else: + emsg = ( + f"Mesh coordinate {src_coord.name()!r} does not match between the " + f"LHS cube {self.lhs_cube.name()!r} and " + f"RHS cube {self.rhs_cube.name()!r}." + ) + raise ValueError(emsg) + + if prepared_item is None: + # Make a "normal" _PreparedItem, which is specified using + # points + bounds arrays. + # First, convert any un-matching MeshCoords to AuxCoord + if src_is_mesh: + src_coord = AuxCoord.from_coord(src_coord) + if tgt_is_mesh: + tgt_coord = AuxCoord.from_coord(tgt_coord) + points, bounds = self._prepare_points_and_bounds( + src_coord, + tgt_coord, + src_item.dims, + tgt_item.dims, + ignore_mismatch=ignore_mismatch, + ) + if points is not None: + src_type = type(src_coord) + tgt_type = type(tgt_coord) + # Downcast to aux if there are mixed container types. + container = src_type if src_type is tgt_type else AuxCoord + prepared_item = self._create_prepared_item( + src_coord, + tgt_item.dims, + src_metadata=src_metadata, + tgt_metadata=tgt_item.metadata, + points=points, + bounds=bounds, + container=container, + ) + + if prepared_item is not None: + prepared_items.append(prepared_item) + + def _prepare_common_dim_payload( + self, src_coverage, tgt_coverage, ignore_mismatch=None + ): + """Consolidate common dimension coordinate metadata. + + Populate the ``items_dim`` member of :attr:`~iris.common.resolve.Resolve.prepared_category_items` + with a :class:`~iris.common.resolve._PreparedItem` containing the necessary metadata for + each :class:`~iris.coords.DimCoord` to be constructed and attached to the resulting resolved + :class:`~iris.cube.Cube`. + + Parameters + ---------- + src_coverage : + The :class:`~iris.common.resolve._DimCoverage` metadata for the + ``src`` :class:`~iris.cube.Cube`. + tgt_coverage : + The :class:`~iris.common.resolve._DimCoverage` metadata for the + ``tgt`` :class:`~iris.cube.Cube`. + ignore_mismatch : optional + When ``False``, an exception will be raised if a difference is detected between corresponding + ``src`` and ``tgt`` :class:`~iris.coords.DimCoord` ``points`` and/or ``bounds``. + When ``True``, the coverage metadata is ignored i.e., a :class:`~iris.coords.DimCoord` will not + be constructed and added to the resulting resolved :class:`~iris.cube.Cube`. + Defaults to ``False``. + + """ + from iris.coords import DimCoord + + if ignore_mismatch is None: + # Configure ability to ignore coordinate points/bounds + # mismatches between common items. + ignore_mismatch = False + + for src_dim in src_coverage.dims_common: + src_metadata = src_coverage.metadata[src_dim] + src_coord = src_coverage.coords[src_dim] + + tgt_dim = self.mapping[src_dim] + tgt_metadata = tgt_coverage.metadata[tgt_dim] + tgt_coord = tgt_coverage.coords[tgt_dim] + + points, bounds = self._prepare_points_and_bounds( + src_coord, + tgt_coord, + src_dim, + tgt_dim, + ignore_mismatch=ignore_mismatch, + ) + + if points is not None: + prepared_item = self._create_prepared_item( + src_coord, + tgt_dim, + src_metadata=src_metadata, + tgt_metadata=tgt_metadata, + points=points, + bounds=bounds, + container=DimCoord, + ) + self.prepared_category.items_dim.append(prepared_item) + + def _get_prepared_item( + self, metadata, category_local, from_src=True, from_local=False + ): + """Find the :attr:`~iris.common.resolve._PreparedItem`. + + Find the :attr:`~iris.common.resolve._PreparedItem` from the + :attr:`~iris.common.resolve.Resolve.prepared_category` that matches the provided ``metadata``. + + Alternatively, the ``category_local`` is searched to find a :class:`~iris.common.resolve._Item` + with matching ``metadata`` from either the local ``src`` or ``tgt`` :class:`~iris.cube.Cube`. + If a match is found, then a new `~iris.common.resolve._PreparedItem` is created and added to + :attr:`~iris.common.resolve.Resolve.prepared_category` and returned. See ``from_local``. + + Parameters + ---------- + metadata : + The target metadata of the prepared (or local) item to retrieve. + category_local : + The :class:`~iris.common.resolve._CategoryItems` containing the + local metadata of either the ``src`` or ``tgt`` :class:`~iris.cube.Cube`. + See ``from_local``. + from_src : bool, default=True + Boolean stating whether the ``metadata`` is from the ``src`` (``True``) + or ``tgt`` :class:`~iris.cube.Cube`. + Defaults to ``True``. + from_local : bool, default=False + Boolean controlling whether the ``metadata`` is used to search the + ``category_local`` (``True``) or the :attr:`~iris.common.resolve.Resolve.prepared_category`. + Defaults to ``False``. + + Returns + ------- + :class:`~iris.common.resolve._PreparedItem` + The :class:`~iris.common.resolve._PreparedItem` matching the provided ``metadata``. + + """ + result = None + + if from_local: + category = category_local + match = lambda item: item.metadata == metadata + else: + category = self.prepared_category + if from_src: + match = lambda item: item.metadata.src == metadata + else: + match = lambda item: item.metadata.tgt == metadata + + for member in category._fields: + category_items = getattr(category, member) + matched_items = tuple(filter(match, category_items)) + if matched_items: + if len(matched_items) > 1: + dmsg = ( + f"ignoring factory dependency {metadata}, multiple {'src' if from_src else 'tgt'} " + f"{'local' if from_local else 'prepared'} metadata matches" + ) + logger.debug(dmsg) + else: + (item,) = matched_items + if from_local: + src = tgt = None + if from_src: + src = item.metadata + dims = tuple([self.mapping[dim] for dim in item.dims]) + else: + tgt = item.metadata + dims = item.dims + result = self._create_prepared_item( + item.coord, + dims, + src_metadata=src, + tgt_metadata=tgt, + ) + getattr(self.prepared_category, member).append(result) + else: + result = item + break + return result + + def _prepare_factory_payload(self, cube, category_local, from_src=True): + """Consolidate common factory metadata. + + Populate the :attr:`~iris.common.resolve.Resolve.prepared_factories` with a + :class:`~iris.common.resolve._PreparedFactory` containing the necessary + metadata for each ``src`` and/or ``tgt`` auxiliary factory to be constructed + and attached to the resulting resolved :class:`~iris.cube.Cube`. + + .. note:: + + The required dependencies of an auxiliary factory may not all be available in the + :attr:`~iris.common.resolve.Resolve.prepared_category` and therefore this is a legitimate + reason to add the associated metadata of the local dependency to the ``prepared_category``. + + Parameters + ---------- + cube : :class:`~iris.cube.Cube` + The :class:`~iris.cube.Cube` that may contain an auxiliary factory + to be prepared. + category_local : :class:`~iris.common.resolve._CategoryItems` + The :class:`~iris.common.resolve._CategoryItems` of all metadata + local to the provided ``cube``. + from_src : bool, default=True + Boolean stating whether the provided ``cube`` is either a ``src`` or ``tgt`` + :class:`~iris.cube.Cube` - used to retrieve the appropriate metadata from a + :class:`~iris.common.resolve._PreparedMetadata`. + + """ + for factory in cube.aux_factories: + container = type(factory) + dependencies = {} + prepared_item = None + found = True + + if tuple( + filter( + lambda item: item.container is container, + self.prepared_factories, + ) + ): + # debug: skipping, factory already exists + dmsg = ( + f"ignoring {'src' if from_src else 'tgt'} {container}, " + f"a similar factory has already been prepared" + ) + logger.debug(dmsg) + continue + + for ( + dependency_name, + dependency_coord, + ) in factory.dependencies.items(): + metadata = dependency_coord.metadata + prepared_item = self._get_prepared_item( + metadata, category_local, from_src=from_src + ) + if prepared_item is None: + prepared_item = self._get_prepared_item( + metadata, + category_local, + from_src=from_src, + from_local=True, + ) + if prepared_item is None: + dmsg = f"cannot find matching {metadata} for {container} dependency {dependency_name}" + logger.debug(dmsg) + found = False + break + dependencies[dependency_name] = prepared_item.metadata + + if found and prepared_item is not None: + prepared_factory = _PreparedFactory( + container=container, dependencies=dependencies + ) + self.prepared_factories.append(prepared_factory) + else: + dmsg = f"ignoring {'src' if from_src else 'tgt'} {container}, cannot find all dependencies" + logger.debug(dmsg) + + def _prepare_local_payload_aux(self, src_aux_coverage, tgt_aux_coverage): + """Consolidate local auxiliary coordinate metadata. + + Populate the ``items_aux`` member of :attr:`~iris.common.resolve.Resolve.prepared_category_items` + with a :class:`~iris.common.resolve._PreparedItem` containing the necessary metadata for each + ``src`` or ``tgt`` local auxiliary coordinate to be constructed and attached to the resulting + resolved :class:`~iris.cube.Cube`. + + .. note:: + + In general, lenient behaviour subscribes to the philosophy that + it is easier to remove metadata than it is to find then add + metadata. To those ends, lenient behaviour supports metadata + richness by adding both local ``src`` and ``tgt`` auxiliary + coordinates. Alternatively, strict behaviour will only add a + ``tgt`` local auxiliary coordinate that spans dimensions not + mapped to by the ``src`` e.g., extra ``tgt`` dimensions. + + Parameters + ---------- + src_aux_coverage : + The :class:`~iris.common.resolve.Resolve._AuxCoverage` for the + ``src`` :class:`~iris.cube.Cube`. + tgt_aux_coverage : + The :class:~iris.common.resolve.Resolve._AuxCoverage` for the + ``tgt`` :class:`~iris.cube.Cube`. + + """ + # Determine whether there are tgt dimensions not mapped to by an + # associated src dimension, and thus may be covered by any local + # tgt aux coordinates. + extra_tgt_dims = set(range(tgt_aux_coverage.cube.ndim)) - set( + self.mapping.values() + ) + + if LENIENT["maths"]: + mapped_src_dims = set(self.mapping.keys()) + mapped_tgt_dims = set(self.mapping.values()) + + # Add local src aux coordinates. + for item in src_aux_coverage.local_items_aux: + if all([dim in mapped_src_dims for dim in item.dims]): + tgt_dims = tuple([self.mapping[dim] for dim in item.dims]) + prepared_item = self._create_prepared_item( + item.coord, tgt_dims, src_metadata=item.metadata + ) + self.prepared_category.items_aux.append(prepared_item) + else: + dmsg = ( + f"ignoring local src {self._src_cube_position} cube " + f"aux coordinate {item.metadata}, as not all src " + f"dimensions {item.dims} are mapped" + ) + logger.debug(dmsg) + else: + # For strict maths, only local tgt aux coordinates covering + # the extra dimensions of the tgt cube may be added. + mapped_tgt_dims = set() + + # Add local tgt aux coordinates. + for item in tgt_aux_coverage.local_items_aux: + tgt_dims = item.dims + if all([dim in mapped_tgt_dims for dim in tgt_dims]) or any( + [dim in extra_tgt_dims for dim in tgt_dims] + ): + prepared_item = self._create_prepared_item( + item.coord, tgt_dims, tgt_metadata=item.metadata + ) + self.prepared_category.items_aux.append(prepared_item) + else: + dmsg = ( + f"ignoring local tgt {self._tgt_cube_position} cube " + f"aux coordinate {item.metadata}, as not all tgt " + f"dimensions {tgt_dims} are mapped" + ) + logger.debug(dmsg) + + def _prepare_local_payload_dim(self, src_dim_coverage, tgt_dim_coverage): + """Consolidate local dimension coordinate metadata. + + Populate the ``items_dim`` member of :attr:`~iris.common.resolve.Resolve.prepared_category_items` + with a :class:`~iris.common.resolve._PreparedItem` containing the necessary metadata for each + ``src`` or ``tgt`` local :class:`~iris.coords.DimCoord` to be constructed and attached to the + resulting resolved :class:`~iris.cube.Cube`. + + .. note:: + + In general, a local coordinate will only be added if there is no other metadata competing + to describe the same dimension/s on the ``tgt`` :class:`~iris.cube.Cube`. Lenient behaviour + is more liberal, whereas strict behaviour will only add a local ``tgt`` coordinate covering + an unmapped "extra" ``tgt`` dimension/s. + + Parameters + ---------- + src_dim_coverage : + The :class:`~iris.common.resolve.Resolve._DimCoverage` for the + ``src`` :class:`~iris.cube.Cube`. + tgt_dim_coverage : + The :class:`~iris.common.resolve.Resolve._DimCoverage` for the + ``tgt`` :class:`~iris.cube.Cube`. + + """ + mapped_tgt_dims = self.mapping.values() + + # Determine whether there are tgt dimensions not mapped to by an + # associated src dimension, and thus may be covered by any local + # tgt dim coordinates. + extra_tgt_dims = set(range(tgt_dim_coverage.cube.ndim)) - set(mapped_tgt_dims) + + if LENIENT["maths"]: + tgt_dims_conflict = set() + + # Add local src dim coordinates. + for src_dim in src_dim_coverage.dims_local: + tgt_dim = self.mapping[src_dim] + # Only add the local src dim coordinate iff there is no + # associated local tgt dim coordinate. + if tgt_dim not in tgt_dim_coverage.dims_local: + metadata = src_dim_coverage.metadata[src_dim] + coord = src_dim_coverage.coords[src_dim] + prepared_item = self._create_prepared_item( + coord, tgt_dim, src_metadata=metadata + ) + self.prepared_category.items_dim.append(prepared_item) + else: + tgt_dims_conflict.add(tgt_dim) + if self._debug: + src_metadata = src_dim_coverage.metadata[src_dim] + tgt_metadata = tgt_dim_coverage.metadata[tgt_dim] + dmsg = ( + f"ignoring local src {self._src_cube_position} cube " + f"dim coordinate {src_metadata}, as conflicts with " + f"tgt {self._tgt_cube_position} cube dim coordinate " + f"{tgt_metadata}, mapping ({src_dim},)->({tgt_dim},)" + ) + logger.debug(dmsg) + + # Determine whether there are any tgt dims free to be mapped + # by an available local tgt dim coordinate. + tgt_dims_unmapped = set(tgt_dim_coverage.dims_local) - tgt_dims_conflict + else: + # For strict maths, only local tgt dim coordinates covering + # the extra dimensions of the tgt cube may be added. + tgt_dims_unmapped = extra_tgt_dims + + # Add local tgt dim coordinates. + for tgt_dim in tgt_dims_unmapped: + if tgt_dim in mapped_tgt_dims or tgt_dim in extra_tgt_dims: + metadata = tgt_dim_coverage.metadata[tgt_dim] + if metadata is not None: + coord = tgt_dim_coverage.coords[tgt_dim] + prepared_item = self._create_prepared_item( + coord, tgt_dim, tgt_metadata=metadata + ) + self.prepared_category.items_dim.append(prepared_item) + + def _prepare_local_payload_scalar(self, src_aux_coverage, tgt_aux_coverage): + """Consolidate local scalar coordinate metadata. + + Populate the ``items_scalar`` member of :attr:`~iris.common.resolve.Resolve.prepared_category_items` + with a :class:`~iris.common.resolve._PreparedItem` containing the necessary metadata for each + ``src`` or ``tgt`` local scalar coordinate to be constructed and attached to the resulting + resolved :class:`~iris.cube.Cube`. + + .. note:: + + In general, lenient behaviour subscribes to the philosophy that it is easier to remove + metadata than it is to find then add metadata. To those ends, lenient behaviour supports + metadata richness by adding both local ``src`` and ``tgt`` scalar coordinates. + Alternatively, strict behaviour will only add a ``tgt`` local scalar coordinate when the + ``src`` is a scalar :class:`~iris.cube.Cube` with no local scalar coordinates. + + Parameters + ---------- + src_aux_coverage : + The :class:`~iris.common.resolve.Resolve._AuxCoverage` for the + ``src`` :class:`~iris.cube.Cube`. + tgt_aux_coverage : + The :class:~iris.common.resolve.Resolve._AuxCoverage` for the + ``tgt`` :class:`~iris.cube.Cube`. + + """ + # Add all local tgt scalar coordinates iff the src cube is a + # scalar cube with no local src scalar coordinates. + # Only for strict maths. + src_scalar_cube = ( + not LENIENT["maths"] + and src_aux_coverage.cube.ndim == 0 + and len(src_aux_coverage.local_items_scalar) == 0 + ) + + if src_scalar_cube or LENIENT["maths"]: + # Add any local src scalar coordinates, if available. + for item in src_aux_coverage.local_items_scalar: + prepared_item = self._create_prepared_item( + item.coord, item.dims, src_metadata=item.metadata + ) + self.prepared_category.items_scalar.append(prepared_item) + + # Add any local tgt scalar coordinates, if available. + for item in tgt_aux_coverage.local_items_scalar: + prepared_item = self._create_prepared_item( + item.coord, item.dims, tgt_metadata=item.metadata + ) + self.prepared_category.items_scalar.append(prepared_item) + + def _prepare_local_payload( + self, + src_dim_coverage, + src_aux_coverage, + tgt_dim_coverage, + tgt_aux_coverage, + ): + """Consolidate the local metadata. + + Populate the :attr:`~iris.common.resolve.Resolve.prepared_category_items` with a + :class:`~iris.common.resolve._PreparedItem` containing the necessary metadata from the ``src`` + and/or ``tgt`` :class:`~iris.cube.Cube` for each coordinate to be constructed and attached + to the resulting resolved :class:`~iris.cube.Cube`. + + Parameters + ---------- + src_dim_coverage : + The :class:`~iris.common.resolve.Resolve._DimCoverage` for the + ``src`` :class:`~iris.cube.Cube`. + src_aux_coverage : + The :class:`~iris.common.resolve.Resolve._AuxCoverage` for the + ``src`` :class:`~iris.cube.Cube`. + tgt_dim_coverage : + The :class:`~iris.common.resolve.Resolve._DimCoverage` for the + ``tgt`` :class:`~iris.cube.Cube`. + tgt_aux_coverage : + The :class:~iris.common.resolve.Resolve._AuxCoverage` for the + ``tgt`` :class:`~iris.cube.Cube`. + + """ + # Add local src/tgt dim coordinates. + self._prepare_local_payload_dim(src_dim_coverage, tgt_dim_coverage) + + # Add local src/tgt aux coordinates. + self._prepare_local_payload_aux(src_aux_coverage, tgt_aux_coverage) + + # Add local src/tgt scalar coordinates. + self._prepare_local_payload_scalar(src_aux_coverage, tgt_aux_coverage) + + def _prepare_points_and_bounds( + self, src_coord, tgt_coord, src_dims, tgt_dims, ignore_mismatch=None + ): + """Consolidate points and bounds. + + Compare the points and bounds of the ``src`` and ``tgt`` coordinates to ensure + that they are equivalent, taking into account broadcasting when appropriate. + + .. note:: + + An exception will be raised if the ``src`` and ``tgt`` coordinates cannot + be broadcast. + + .. note:: + + An exception will be raised if either the points or bounds are different, + however appropriate lenient behaviour concessions are applied. + + Parameters + ---------- + src_coord : + The ``src`` :class:`~iris.cube.Cube` coordinate with metadata matching + the ``tgt_coord``. + tgt_coord : + The ``tgt`` :class`~iris.cube.Cube` coordinate with metadata matching + the ``src_coord``. + src_dims : + The dimension/s of the ``src_coord`` attached to the ``src`` :class:`~iris.cube.Cube`. + tgt_dims : + The dimension/s of the ``tgt_coord`` attached to the ``tgt`` :class:`~iris.cube.Cube`. + ignore_mismatch : bool, optional + For lenient behaviour only, don't raise an exception if there is a difference between + the ``src`` and ``tgt`` coordinate points or bounds. + Defaults to ``False``. + + Returns + ------- + Tuple of equivalent ``points`` and ``bounds``, otherwise ``None``. + + """ + from iris.util import array_equal + + if ignore_mismatch is None: + # Configure ability to ignore coordinate points/bounds + # mismatches between common items. + ignore_mismatch = False + + points, bounds = None, None + + if not isinstance(src_dims, Iterable): + src_dims = (src_dims,) + + if not isinstance(tgt_dims, Iterable): + tgt_dims = (tgt_dims,) + + # Deal with coordinates that have been sliced. + if src_coord.ndim != tgt_coord.ndim: + if tgt_coord.ndim > src_coord.ndim: + # Use the tgt coordinate points/bounds. + points = tgt_coord.points + bounds = tgt_coord.bounds + else: + # Use the src coordinate points/bounds. + points = src_coord.points + bounds = src_coord.bounds + + # Deal with coordinates spanning broadcast dimensions. + if points is None and bounds is None and src_coord.shape != tgt_coord.shape: + # Check whether the src coordinate is broadcasting. + dims = tuple([self.mapping[dim] for dim in src_dims]) + src_shape_broadcast = tuple([self.shape[dim] for dim in dims]) + src_cube_shape = self._src_cube.shape + src_shape = tuple([src_cube_shape[dim] for dim in src_dims]) + src_broadcasting = src_shape != src_shape_broadcast + + # Check whether the tgt coordinate is broadcasting. + tgt_shape_broadcast = tuple([self.shape[dim] for dim in tgt_dims]) + tgt_cube_shape = self._tgt_cube.shape + tgt_shape = tuple([tgt_cube_shape[dim] for dim in tgt_dims]) + tgt_broadcasting = tgt_shape != tgt_shape_broadcast + + if src_broadcasting and tgt_broadcasting: + # TBD: Extend capability to support attempting to broadcast two-way multi-dimensional coordinates. + emsg = ( + f"Cannot broadcast the coordinate {src_coord.name()!r} on " + f"{self._src_cube_position} cube {self._src_cube.name()!r} and " + f"coordinate {tgt_coord.name()!r} on " + f"{self._tgt_cube_position} cube {self._tgt_cube.name()!r} to " + f"broadcast shape {tgt_shape_broadcast}." + ) + raise ValueError(emsg) + elif src_broadcasting: + # Use the tgt coordinate points/bounds. + points = tgt_coord.points + bounds = tgt_coord.bounds + elif tgt_broadcasting: + # Use the src coordinate points/bounds. + points = src_coord.points + bounds = src_coord.bounds + + if points is None and bounds is None: + # Note that, this also ensures shape equality. + eq_points = array_equal(src_coord.points, tgt_coord.points, withnans=True) + if eq_points: + points = src_coord.points + src_has_bounds = src_coord.has_bounds() + tgt_has_bounds = tgt_coord.has_bounds() + + if src_has_bounds and tgt_has_bounds: + src_bounds = src_coord.bounds + eq_bounds = array_equal(src_bounds, tgt_coord.bounds, withnans=True) + + if eq_bounds: + bounds = src_bounds + else: + if LENIENT["maths"] and ignore_mismatch: + # For lenient, ignore coordinate with mis-matched bounds. + dmsg = ( + f"ignoring src {self._src_cube_position} cube " + f"{src_coord.metadata}, unequal bounds with " + f"tgt {self._tgt_cube_position} cube, " + f"{src_dims}->{tgt_dims}" + ) + logger.debug(dmsg) + else: + emsg = ( + f"Coordinate {src_coord.name()!r} has different bounds for the " + f"LHS cube {self.lhs_cube.name()!r} and " + f"RHS cube {self.rhs_cube.name()!r}." + ) + raise ValueError(emsg) + else: + # For lenient, use either of the coordinate bounds, if they exist. + if LENIENT["maths"]: + if src_has_bounds: + dmsg = ( + f"using src {self._src_cube_position} cube " + f"{src_coord.metadata} bounds, tgt has no bounds" + ) + logger.debug(dmsg) + bounds = src_coord.bounds + else: + dmsg = ( + f"using tgt {self._tgt_cube_position} cube " + f"{tgt_coord.metadata} bounds, src has no bounds" + ) + logger.debug(dmsg) + bounds = tgt_coord.bounds + else: + # For strict, both coordinates must have bounds, or both + # coordinates must not have bounds. + if src_has_bounds: + emsg = ( + f"Coordinate {src_coord.name()!r} has bounds for the " + f"{self._src_cube_position} cube {self._src_cube.name()!r}, " + f"but not the {self._tgt_cube_position} cube {self._tgt_cube.name()!r}." + ) + raise ValueError(emsg) + if tgt_has_bounds: + emsg = ( + f"Coordinate {tgt_coord.name()!r} has bounds for the " + f"{self._tgt_cube_position} cube {self._tgt_cube.name()!r}, " + f"but not the {self._src_cube_position} cube {self._src_cube.name()!r}." + ) + raise ValueError(emsg) + else: + if LENIENT["maths"] and ignore_mismatch: + # For lenient, ignore coordinate with mis-matched points. + dmsg = ( + f"ignoring src {self._src_cube_position} cube " + f"{src_coord.metadata}, unequal points with tgt " + f"{src_dims}->{tgt_dims}" + ) + logger.debug(dmsg) + else: + emsg = ( + f"Coordinate {src_coord.name()!r} has different points for the " + f"LHS cube {self.lhs_cube.name()!r} and " + f"RHS cube {self.rhs_cube.name()!r}." + ) + raise ValueError(emsg) + + return points, bounds + + @property + def _src_cube(self): + assert self.map_rhs_to_lhs is not None + if self.map_rhs_to_lhs: + result = self.rhs_cube + else: + result = self.lhs_cube + return result + + @property + def _src_cube_position(self): + assert self.map_rhs_to_lhs is not None + if self.map_rhs_to_lhs: + result = "RHS" + else: + result = "LHS" + return result + + @property + def _src_cube_resolved(self): + assert self.map_rhs_to_lhs is not None + if self.map_rhs_to_lhs: + result = self.rhs_cube_resolved + else: + result = self.lhs_cube_resolved + return result + + @_src_cube_resolved.setter + def _src_cube_resolved(self, cube): + assert self.map_rhs_to_lhs is not None + if self.map_rhs_to_lhs: + self.rhs_cube_resolved = cube + else: + self.lhs_cube_resolved = cube + + @property + def _tgt_cube(self): + assert self.map_rhs_to_lhs is not None + if self.map_rhs_to_lhs: + result = self.lhs_cube + else: + result = self.rhs_cube + return result + + @property + def _tgt_cube_position(self): + assert self.map_rhs_to_lhs is not None + if self.map_rhs_to_lhs: + result = "LHS" + else: + result = "RHS" + return result + + @property + def _tgt_cube_resolved(self): + assert self.map_rhs_to_lhs is not None + if self.map_rhs_to_lhs: + result = self.lhs_cube_resolved + else: + result = self.rhs_cube_resolved + return result + + @_tgt_cube_resolved.setter + def _tgt_cube_resolved(self, cube): + assert self.map_rhs_to_lhs is not None + if self.map_rhs_to_lhs: + self.lhs_cube_resolved = cube + else: + self.rhs_cube_resolved = cube + + def _tgt_cube_prepare(self, data): + cube = self._tgt_cube + + # Replace existing tgt cube data with the provided data. + cube.data = data + + # Clear the aux factories. + for factory in cube.aux_factories: + cube.remove_aux_factory(factory) + + # Clear the cube coordinates. + for coord in cube.coords(): + cube.remove_coord(coord) + + # Clear the cube cell measures. + for cm in cube.cell_measures(): + cube.remove_cell_measure(cm) + + # Clear the ancillary variables. + for av in cube.ancillary_variables(): + cube.remove_ancillary_variable(av) + + def cube(self, data, in_place=False): + """Create the resultant resolved cube. + + Create the resultant :class:`~iris.cube.Cube` from the resolved ``lhs`` + and ``rhs`` :class:`~iris.cube.Cube` operands, using the provided + ``data``. + + Parameters + ---------- + data : + The data payload for the resultant :class:`~iris.cube.Cube`, which + **must match** the expected resolved + :attr:`~iris.common.resolve.Resolve.shape`. + in_place : bool, default=False + If ``True``, the ``data`` is inserted into the ``tgt`` + :class:`~iris.cube.Cube`. The existing metadata of the ``tgt`` + :class:`~iris.cube.Cube` is replaced with the resolved metadata from + the ``lhs`` and ``rhs`` :class:`~iris.cube.Cube` operands. Otherwise, + a **new** :class:`~iris.cube.Cube` instance is returned. + Default is ``False``. + + Returns + ------- + :class:`~iris.cube.Cube` + + Notes + ----- + .. note:: + + :class:`~iris.common.resolve.Resolve` will determine whether the + ``lhs`` :class:`~iris.cube.Cube` operand is mapped to the + ``rhs`` :class:`~iris.cube.Cube` operand, or vice versa. + In general, the **lower rank** operand (``src``) is mapped to the + **higher rank** operand (``tgt``). Therefore, the ``src`` + :class:`~iris.cube.Cube` may be either the ``lhs`` or the ``rhs`` + :class:`~iris.cube.Cube` operand, given the direction of the + mapping. See :attr:`~iris.common.resolve.Resolve.map_rhs_to_lhs`. + + .. warning:: + + It may not be possible to perform an ``in_place`` operation, + due to any transposition or extended broadcasting that requires + to be performed i.e., the ``tgt`` :class:`~iris.cube.Cube` **must + match** the expected resolved + :attr:`~iris.common.resolve.Resolve.shape`. + + Examples + -------- + .. testsetup:: in-place + + import iris + import numpy as np + from iris.common import Resolve + cube1 = iris.load_cube(iris.sample_data_path("A1B_north_america.nc")) + cube2 = iris.load_cube(iris.sample_data_path("E1_north_america.nc"))[0] + cube2.transpose() + zeros = np.zeros(cube1.shape, dtype=cube1.dtype) + + .. doctest:: in-place + + >>> resolver = Resolve(cube1, cube2) + >>> resolver.map_rhs_to_lhs + True + >>> cube1.data.sum() + 124652160.0 + >>> zeros.shape + (240, 37, 49) + >>> zeros.sum() + 0.0 + >>> result = resolver.cube(zeros, in_place=True) + >>> result is cube1 + True + >>> cube1.data.sum() + 0.0 + + """ + from iris.cube import Cube + + expected_shape = self.shape + + # Ensure that we have been provided with candidate cubes, which are + # now resolved and metadata is prepared, ready and awaiting the + # resultant resolved cube. + if expected_shape is None: + emsg = ( + "Cannot resolve resultant cube, as no candidate cubes have " + "been provided." + ) + raise ValueError(emsg) + + if not hasattr(data, "shape"): + data = np.asanyarray(data) + + # Ensure that the shape of the provided data is the expected + # shape of the resultant resolved cube. + if data.shape != expected_shape: + emsg = ( + "Cannot resolve resultant cube, as the provided data must " + f"have shape {expected_shape}, got data shape {data.shape}." + ) + raise ValueError(emsg) + + if in_place: + result = self._tgt_cube + + if result.shape != expected_shape: + emsg = ( + "Cannot resolve resultant cube in-place, as the " + f"{self._tgt_cube_position} tgt cube {result.name()!r} " + f"requires data with shape {result.shape}, got data " + f"shape {data.shape}. Suggest not performing this " + "operation in-place." + ) + raise ValueError(emsg) + + # Prepare target cube for in-place population with the prepared + # metadata content and the provided data. + self._tgt_cube_prepare(data) + else: + # Create the resultant resolved cube with provided data. + result = Cube(data) + + # Add the combined cube metadata from both the candidate cubes. + result.metadata = self.lhs_cube.metadata.combine(self.rhs_cube.metadata) + + # Add the prepared dim coordinates. + for item in self.prepared_category.items_dim: + coord = item.create_coord(metadata=item.metadata.combined) + result.add_dim_coord(coord, item.dims) + + # Add the prepared aux and scalar coordinates. + prepared_aux_coords = ( + self.prepared_category.items_aux + self.prepared_category.items_scalar + ) + for item in prepared_aux_coords: + # These items are "special" + coord = item.create_coord(metadata=item.metadata.combined) + try: + result.add_aux_coord(coord, item.dims) + except ValueError as err: + scalar = dims = "" + if item.dims: + plural = "s" if len(item.dims) > 1 else "" + dims = f" with tgt dim{plural} {item.dims}" + else: + scalar = "scalar " + dmsg = ( + f"ignoring prepared {scalar}coordinate " + f"{coord.metadata}{dims}, got {err!r}" + ) + logger.debug(dmsg) + + # Add the prepared aux factories. + for prepared_factory in self.prepared_factories: + dependencies = dict() + for ( + dependency_name, + prepared_metadata, + ) in prepared_factory.dependencies.items(): + coord = result.coord(prepared_metadata.combined) + dependencies[dependency_name] = coord + factory = prepared_factory.container(**dependencies) + result.add_aux_factory(factory) + + return result + + @property + def mapped(self): + """Whether all ``src`` dimensions have been mapped. + + Boolean state representing whether **all** ``src`` :class:`~iris.cube.Cube` + dimensions have been associated with relevant ``tgt`` + :class:`~iris.cube.Cube` dimensions. + + .. note:: + + :class:`~iris.common.resolve.Resolve` will determine whether the + ``lhs`` :class:`~iris.cube.Cube` operand is mapped to the + ``rhs`` :class:`~iris.cube.Cube` operand, or vice versa. + In general, the **lower rank** operand (``src``) is mapped to the + **higher rank** operand (``tgt``). Therefore, the ``src`` + :class:`~iris.cube.Cube` may be either the ``lhs`` or the ``rhs`` + :class:`~iris.cube.Cube` operand, given the direction of the + mapping. See :attr:`~iris.common.resolve.Resolve.map_rhs_to_lhs`. + + If no :class:`~iris.cube.Cube` operands have been provided, then + ``mapped`` is ``None``. + + For example: + + .. doctest:: + + >>> print(cube1) + air_temperature / (K) (time: 240; latitude: 37; longitude: 49) + Dimension coordinates: + time x - - + latitude - x - + longitude - - x + Auxiliary coordinates: + forecast_period x - - + Scalar coordinates: + forecast_reference_time 1859-09-01 06:00:00 + height 1.5 m + Cell methods: + 0 time: mean (interval: 6 hour) + Attributes: + Conventions 'CF-1.5' + Model scenario 'A1B' + STASH m01s03i236 + source 'Data from Met Office Unified Model 6.05' + >>> print(cube2) + air_temperature / (K) (longitude: 49; latitude: 37) + Dimension coordinates: + longitude x - + latitude - x + Scalar coordinates: + forecast_period 10794 hours + forecast_reference_time 1859-09-01 06:00:00 + height 1.5 m + time 1860-06-01 00:00:00, bound=(1859-12-01 00:00:00, 1860-12-01 00:00:00) + Cell methods: + 0 time: mean (interval: 6 hour) + Attributes: + Conventions 'CF-1.5' + Model scenario 'E1' + STASH m01s03i236 + source 'Data from Met Office Unified Model 6.05' + >>> Resolve().mapped is None + True + >>> resolver = Resolve(cube1, cube2) + >>> resolver.mapped + True + >>> resolver.map_rhs_to_lhs + True + >>> resolver = Resolve(cube2, cube1) + >>> resolver.mapped + True + >>> resolver.map_rhs_to_lhs + False + + """ # noqa: D214, D406, D407, D410, D411 + result = None + if self.mapping is not None: + result = self._src_cube.ndim == len(self.mapping) + return result + + @property + def shape(self): + """Proposed shape of the final resolved cube. + + Proposed shape of the final resolved cube given the ``lhs`` + :class:`~iris.cube.Cube` operand and the ``rhs`` :class:`~iris.cube.Cube` + operand. + + If no :class:`~iris.cube.Cube` operands have been provided, then + ``shape`` is ``None``. + + For example: + + .. doctest:: + + >>> print(cube1) + air_temperature / (K) (time: 240; latitude: 37; longitude: 49) + Dimension coordinates: + time x - - + latitude - x - + longitude - - x + Auxiliary coordinates: + forecast_period x - - + Scalar coordinates: + forecast_reference_time 1859-09-01 06:00:00 + height 1.5 m + Cell methods: + 0 time: mean (interval: 6 hour) + Attributes: + Conventions 'CF-1.5' + Model scenario 'A1B' + STASH m01s03i236 + source 'Data from Met Office Unified Model 6.05' + >>> print(cube2) + air_temperature / (K) (longitude: 49; latitude: 37) + Dimension coordinates: + longitude x - + latitude - x + Scalar coordinates: + forecast_period 10794 hours + forecast_reference_time 1859-09-01 06:00:00 + height 1.5 m + time 1860-06-01 00:00:00, bound=(1859-12-01 00:00:00, 1860-12-01 00:00:00) + Cell methods: + 0 time: mean (interval: 6 hour) + Attributes: + Conventions 'CF-1.5' + Model scenario 'E1' + STASH m01s03i236 + source 'Data from Met Office Unified Model 6.05' + >>> Resolve().shape is None + True + >>> Resolve(cube1, cube2).shape + (240, 37, 49) + >>> Resolve(cube2, cube1).shape + (240, 37, 49) + + """ # noqa: D214, D406, D407, D410, D411 + return self._broadcast_shape diff --git a/lib/iris/config.py b/lib/iris/config.py index f375128a14..9cec602a95 100644 --- a/lib/iris/config.py +++ b/lib/iris/config.py @@ -1,25 +1,12 @@ -# (C) British Crown Copyright 2010 - 2018, Met Office +# Copyright Iris contributors # -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . -""" -Provides access to Iris-specific configuration values. +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Provides access to Iris-specific configuration values. The default configuration values can be overridden by creating the file ``iris/etc/site.cfg``. If it exists, this file must conform to the format -defined by :mod:`ConfigParser`. +defined by :mod:`configparser`. ---------- @@ -34,27 +21,93 @@ The full path to the Iris palette configuration directory -.. py:data:: iris.config.IMPORT_LOGGER - - The [optional] name of the logger to notify when first imported. - ---------- -""" -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa -import six -from six.moves import configparser +""" +import configparser import contextlib +import logging import os.path -import sys import warnings +import iris.warnings + + +def get_logger(name, datefmt=None, fmt=None, level=None, propagate=None, handler=True): + """Create a custom class for logging. + + Create a :class:`logging.Logger` with a :class:`logging.StreamHandler` + and custom :class:`logging.Formatter`. + + Parameters + ---------- + name : + The name of the logger. Typically this is the module filename that + owns the logger. + datefmt : optional + The date format string of the :class:`logging.Formatter`. + Defaults to ``%d-%m-%Y %H:%M:%S``. + fmt : optional + The additional format string of the :class:`logging.Formatter`. + This is appended to the default format string + ``%(asctime)s %(name)s %(levelname)s - %(message)s``. + level : optional + The threshold level of the logger. Defaults to ``INFO``. + propagate : optional + Sets the ``propagate`` attribute of the :class:`logging.Logger`, + which determines whether events logged to this logger will be + passed to the handlers of higher level loggers. Defaults to + ``False``. + handler : bool, default=True + Create and attach a :class:`logging.StreamHandler` to the + logger. Defaults to ``True``. + + Returns + ------- + :class:`logging.Logger`. + + """ + if level is None: + # Default logging level. + level = "INFO" + + if propagate is None: + # Default logging propagate behaviour. + propagate = False + + # Create the named logger. + logger = logging.getLogger(name) + logger.setLevel(level) + logger.propagate = propagate + + # Create and add the handler to the logger, if required. + if handler: + if datefmt is None: + # Default date format string. + datefmt = "%d-%m-%Y %H:%M:%S" + + # Default format string. + _fmt = "%(asctime)s %(name)s %(levelname)s - %(message)s" + # Append additional format string, if appropriate. + fmt = _fmt if fmt is None else f"{_fmt} {fmt}" + + # Create a formatter. + formatter = logging.Formatter(fmt=fmt, datefmt=datefmt) + + # Create a logging handler. + handler = logging.StreamHandler() + handler.setFormatter(formatter) + + logger.addHandler(handler) + + return logger + # Returns simple string options def get_option(section, option, default=None): - """ + """Return the option value for the given section. + Returns the option value for the given section, or the default value if the section/option is not present. @@ -67,7 +120,8 @@ def get_option(section, option, default=None): # Returns directory path options def get_dir_option(section, option, default=None): - """ + """Return the directory path from the given option and section. + Returns the directory path from the given option and section, or returns the given default value if the section/option is not present or does not represent a valid directory. @@ -79,9 +133,14 @@ def get_dir_option(section, option, default=None): if os.path.isdir(c_path): path = c_path else: - msg = 'Ignoring config item {!r}:{!r} (section:option) as {!r}' \ - ' is not a valid directory path.' - warnings.warn(msg.format(section, option, c_path)) + msg = ( + "Ignoring config item {!r}:{!r} (section:option) as {!r}" + " is not a valid directory path." + ) + warnings.warn( + msg.format(section, option, c_path), + category=iris.warnings.IrisIgnoringWarning, + ) return path @@ -89,61 +148,57 @@ def get_dir_option(section, option, default=None): ROOT_PATH = os.path.abspath(os.path.dirname(__file__)) # The full path to the configuration directory of the active Iris instance. -CONFIG_PATH = os.path.join(ROOT_PATH, 'etc') +CONFIG_PATH = os.path.join(ROOT_PATH, "etc") # Load the optional "site.cfg" file if it exists. -if sys.version_info >= (3, 2): - config = configparser.ConfigParser() -else: - config = configparser.SafeConfigParser() -config.read([os.path.join(CONFIG_PATH, 'site.cfg')]) - +config = configparser.ConfigParser() +config.read([os.path.join(CONFIG_PATH, "site.cfg")]) ################## # Resource options -_RESOURCE_SECTION = 'Resources' +_RESOURCE_SECTION = "Resources" -TEST_DATA_DIR = get_dir_option(_RESOURCE_SECTION, 'test_data_dir', - default=os.path.join(os.path.dirname(__file__), - 'test_data')) +TEST_DATA_DIR = get_dir_option( + _RESOURCE_SECTION, + "test_data_dir", + default=os.path.join(os.path.dirname(__file__), "test_data"), +) # Override the data repository if the appropriate environment variable -# has been set. This is used in setup.py in the TestRunner command to -# enable us to simulate the absence of external data. +# has been set. override = os.environ.get("OVERRIDE_TEST_DATA_REPOSITORY") if override: TEST_DATA_DIR = None if os.path.isdir(os.path.expanduser(override)): TEST_DATA_DIR = os.path.abspath(override) -PALETTE_PATH = get_dir_option(_RESOURCE_SECTION, 'palette_path', - os.path.join(CONFIG_PATH, 'palette')) +PALETTE_PATH = get_dir_option( + _RESOURCE_SECTION, "palette_path", os.path.join(CONFIG_PATH, "palette") +) # Runtime options -class NetCDF(object): +class NetCDF: """Control Iris NetCDF options.""" def __init__(self, conventions_override=None): - """ - Set up NetCDF processing options for Iris. + """Set up NetCDF processing options for Iris. - Currently accepted kwargs: - - * conventions_override (bool): + Parameters + ---------- + conventions_override : bool, optional Define whether the CF Conventions version (e.g. `CF-1.6`) set when saving a cube to a NetCDF file should be defined by - Iris (the default) or the cube being saved. - - If `False` (the default), specifies that Iris should set the + Iris (the default) or the cube being saved. If `False` + (the default), specifies that Iris should set the CF Conventions version when saving cubes as NetCDF files. If `True`, specifies that the cubes being saved to NetCDF should set the CF Conventions version for the saved NetCDF files. - Example usages: - + Examples + -------- * Specify, for the lifetime of the session, that we want all cubes written to NetCDF to define their own CF Conventions versions:: @@ -159,50 +214,57 @@ def __init__(self, conventions_override=None): """ # Define allowed `__dict__` keys first. - self.__dict__['conventions_override'] = None + self.__dict__["conventions_override"] = None # Now set specific values. - setattr(self, 'conventions_override', conventions_override) + setattr(self, "conventions_override", conventions_override) def __repr__(self): - msg = 'NetCDF options: {}.' + msg = "NetCDF options: {}." # Automatically populate with all currently accepted kwargs. - options = ['{}={}'.format(k, v) - for k, v in six.iteritems(self.__dict__)] - joined = ', '.join(options) + options = ["{}={}".format(k, v) for k, v in self.__dict__.items()] + joined = ", ".join(options) return msg.format(joined) def __setattr__(self, name, value): if name not in self.__dict__: # Can't add new names. - msg = 'Cannot set option {!r} for {} configuration.' + msg = "Cannot set option {!r} for {} configuration." raise AttributeError(msg.format(name, self.__class__.__name__)) if value is None: # Set an unset value to the name's default. - value = self._defaults_dict[name]['default'] - if self._defaults_dict[name]['options'] is not None: + value = self._defaults_dict[name]["default"] + if self._defaults_dict[name]["options"] is not None: # Replace a bad value with a good one if there is a defined set of # specified good values. If there isn't, we can assume that # anything goes. - if value not in self._defaults_dict[name]['options']: - good_value = self._defaults_dict[name]['default'] - wmsg = ('Attempting to set invalid value {!r} for ' - 'attribute {!r}. Defaulting to {!r}.') - warnings.warn(wmsg.format(value, name, good_value)) + if value not in self._defaults_dict[name]["options"]: + good_value = self._defaults_dict[name]["default"] + wmsg = ( + "Attempting to set invalid value {!r} for " + "attribute {!r}. Defaulting to {!r}." + ) + warnings.warn( + wmsg.format(value, name, good_value), + category=iris.warnings.IrisDefaultingWarning, + ) value = good_value self.__dict__[name] = value @property def _defaults_dict(self): # Set this as a property so that it isn't added to `self.__dict__`. - return {'conventions_override': {'default': False, - 'options': [True, False]}, - } + return { + "conventions_override": { + "default": False, + "options": [True, False], + }, + } @contextlib.contextmanager def context(self, **kwargs): - """ - Allow temporary modification of the options via a context manager. + """Allow temporary modification of the options via a context manager. + Accepted kwargs are the same as can be supplied to the Option. """ @@ -210,7 +272,7 @@ def context(self, **kwargs): # contextmanager block. starting_state = self.__dict__.copy() # Update the state to reflect the requested changes. - for name, value in six.iteritems(kwargs): + for name, value in kwargs.items(): setattr(self, name, value) try: yield diff --git a/lib/iris/coord_categorisation.py b/lib/iris/coord_categorisation.py index 3c3303441d..12ad93a9c3 100644 --- a/lib/iris/coord_categorisation.py +++ b/lib/iris/coord_categorisation.py @@ -1,100 +1,97 @@ -# (C) British Crown Copyright 2010 - 2018, Met Office +# Copyright Iris contributors # -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . -""" -Cube functions for coordinate categorisation. +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Cube functions for coordinate categorisation. All the functions provided here add a new coordinate to a cube. - * The function :func:`add_categorised_coord` performs a generic - coordinate categorisation. - * The other functions all implement specific common cases - (e.g. :func:`add_day_of_month`). - Currently, these are all calendar functions, so they only apply to - "Time coordinates". -""" +* The function :func:`add_categorised_coord` performs a generic + coordinate categorisation. +* The other functions all implement specific common cases + (e.g. :func:`add_day_of_month`). + Currently, these are all calendar functions, so they only apply to + "Time coordinates". -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa -import six +""" import calendar import collections +import inspect +from typing import Callable +import cftime import numpy as np import iris.coords +import iris.cube -def add_categorised_coord(cube, name, from_coord, category_function, - units='1'): - """ - Add a new coordinate to a cube, by categorising an existing one. +def add_categorised_coord( + cube: iris.cube.Cube, + name: str, + from_coord: iris.coords.DimCoord | iris.coords.AuxCoord | str, + category_function: Callable, + units: str = "1", +) -> None: + """Add a new coordinate to a cube, by categorising an existing one. Make a new :class:`iris.coords.AuxCoord` from mapped values, and add it to the cube. - Args: - - * cube (:class:`iris.cube.Cube`): - the cube containing 'from_coord'. The new coord will be added into it. - * name (string): - name of the created coordinate - * from_coord (:class:`iris.coords.Coord` or string): - coordinate in 'cube', or the name of one - * category_function (callable): - function(coordinate, value), returning a category value for a - coordinate point-value - - Kwargs: - - * units: - units of the category value, typically 'no_unit' or '1'. + Parameters + ---------- + cube : + The cube containing 'from_coord'. The new coord will be added into it. + name : + Name of the created coordinate. + from_coord : + Coordinate in 'cube', or the name of one. + category_function : + Function(coordinate, value), returning a category value for a coordinate + point-value. If ``value`` has a type hint :obj:`cftime.datetime`, the + coordinate points are translated to :obj:`cftime.datetime` s before + calling ``category_function``. + units : + Units of the category value, typically 'no_unit' or '1'. """ # Interpret coord, if given as a name - if isinstance(from_coord, six.string_types): - from_coord = cube.coord(from_coord) + coord = cube.coord(from_coord) if isinstance(from_coord, str) else from_coord if len(cube.coords(name)) > 0: msg = 'A coordinate "%s" already exists in the cube.' % name raise ValueError(msg) + # Translate the coordinate points to cftime datetimes if requested. + value_param = list(inspect.signature(category_function).parameters.values())[1] + if issubclass(value_param.annotation, cftime.datetime): + points = coord.units.num2date(coord.points, only_use_cftime_datetimes=True) + else: + points = coord.points + # Construct new coordinate by mapping values, using numpy.vectorize to # support multi-dimensional coords. # Test whether the result contains strings. If it does we must manually # force the dtype because of a numpy bug (see numpy #3270 on GitHub). - result = category_function(from_coord, from_coord.points.ravel()[0]) - if isinstance(result, six.string_types): + result = category_function(coord, points.ravel()[0]) + if isinstance(result, str): str_vectorised_fn = np.vectorize(category_function, otypes=[object]) - # Use a common type for string arrays (N.B. limited to 64 chars) - all_cases_string_type = '|S64' if six.PY2 else '|U64' def vectorised_fn(*args): - return str_vectorised_fn(*args).astype(all_cases_string_type) + # Use a common type for string arrays (N.B. limited to 64 chars). + return str_vectorised_fn(*args).astype("|U64") else: vectorised_fn = np.vectorize(category_function) - new_coord = iris.coords.AuxCoord(vectorised_fn(from_coord, - from_coord.points), - units=units, - attributes=from_coord.attributes.copy()) + new_coord = iris.coords.AuxCoord( + vectorised_fn(coord, points), + units=units, + attributes=coord.attributes.copy(), + ) new_coord.rename(name) # Add into the cube - cube.add_aux_coord(new_coord, cube.coord_dims(from_coord)) + cube.add_aux_coord(new_coord, cube.coord_dims(coord)) # ====================================== @@ -104,134 +101,119 @@ def vectorised_fn(*args): # coordinates only # -# Private "helper" function -def _pt_date(coord, time): - """ - Return the date of a time-coordinate point. +# -------------------------------------------- +# Time categorisations : calendar date components - Args: - * coord (Coord): - coordinate (must be Time-type) - * time (float): - value of a coordinate point +def add_year(cube, coord, name="year"): + """Add a categorical calendar-year coordinate.""" - Returns: - datetime.date - """ - # NOTE: All of the currently defined categorisation functions are - # calendar operations on Time coordinates. - # - All these currently depend on Unit::num2date, which is deprecated (!!) - # - We will want to do better, when we sort out our own Calendars. - # - For now, just make sure these all call through this one function. - return coord.units.num2date(time) + def get_year(_, value: cftime.datetime) -> int: + return value.year + add_categorised_coord(cube, name, coord, get_year) -# -------------------------------------------- -# Time categorisations : calendar date components -def add_year(cube, coord, name='year'): - """Add a categorical calendar-year coordinate.""" - add_categorised_coord( - cube, name, coord, - lambda coord, x: _pt_date(coord, x).year) +def add_month_number(cube, coord, name="month_number"): + """Add a categorical month coordinate, values 1..12.""" + def get_month_number(_, value: cftime.datetime) -> int: + return value.month -def add_month_number(cube, coord, name='month_number'): - """Add a categorical month coordinate, values 1..12.""" - add_categorised_coord( - cube, name, coord, - lambda coord, x: _pt_date(coord, x).month) + add_categorised_coord(cube, name, coord, get_month_number) -def add_month_fullname(cube, coord, name='month_fullname'): +def add_month_fullname(cube, coord, name="month_fullname"): """Add a categorical month coordinate, values 'January'..'December'.""" - add_categorised_coord( - cube, name, coord, - lambda coord, x: calendar.month_name[_pt_date(coord, x).month], - units='no_unit') + def get_month_fullname(_, value: cftime.datetime) -> str: + return calendar.month_name[value.month] + + add_categorised_coord(cube, name, coord, get_month_fullname, units="no_unit") -def add_month(cube, coord, name='month'): + +def add_month(cube, coord, name="month"): """Add a categorical month coordinate, values 'Jan'..'Dec'.""" - add_categorised_coord( - cube, name, coord, - lambda coord, x: calendar.month_abbr[_pt_date(coord, x).month], - units='no_unit') + def get_month_abbr(_, value: cftime.datetime) -> str: + return calendar.month_abbr[value.month] + + add_categorised_coord(cube, name, coord, get_month_abbr, units="no_unit") -def add_day_of_month(cube, coord, name='day_of_month'): + +def add_day_of_month(cube, coord, name="day_of_month"): """Add a categorical day-of-month coordinate, values 1..31.""" - add_categorised_coord( - cube, name, coord, - lambda coord, x: _pt_date(coord, x).day) + def get_day_of_month(_, value: cftime.datetime) -> int: + return value.day -def add_day_of_year(cube, coord, name='day_of_year'): - """ - Add a categorical day-of-year coordinate, values 1..365 - (1..366 in leap years). + add_categorised_coord(cube, name, coord, get_day_of_month) - """ - # Note: cftime.datetime objects return a normal tuple from timetuple(), - # unlike datetime.datetime objects that return a namedtuple. - # Index the time tuple (element 7 is day of year) instead of using named - # element tm_yday. - add_categorised_coord( - cube, name, coord, - lambda coord, x: _pt_date(coord, x).timetuple()[7]) + +def add_day_of_year(cube, coord, name="day_of_year"): + """Add a categorical day-of-year coordinate, values 1..365 (1..366 in leap years).""" + + def get_day_of_year(_, value: cftime.datetime) -> int: + return value.timetuple().tm_yday + + add_categorised_coord(cube, name, coord, get_day_of_year) # -------------------------------------------- # Time categorisations : days of the week -def add_weekday_number(cube, coord, name='weekday_number'): + +def add_weekday_number(cube, coord, name="weekday_number"): """Add a categorical weekday coordinate, values 0..6 [0=Monday].""" - add_categorised_coord( - cube, name, coord, - lambda coord, x: _pt_date(coord, x).weekday()) + def get_weekday_number(_, value: cftime.datetime) -> int: + return value.dayofwk + + add_categorised_coord(cube, name, coord, get_weekday_number) -def add_weekday_fullname(cube, coord, name='weekday_fullname'): + +def add_weekday_fullname(cube, coord, name="weekday_fullname"): """Add a categorical weekday coordinate, values 'Monday'..'Sunday'.""" - add_categorised_coord( - cube, name, coord, - lambda coord, x: calendar.day_name[_pt_date(coord, x).weekday()], - units='no_unit') + def get_weekday_fullname(_, value: cftime.datetime) -> str: + return calendar.day_name[value.dayofwk] + + add_categorised_coord(cube, name, coord, get_weekday_fullname, units="no_unit") -def add_weekday(cube, coord, name='weekday'): + +def add_weekday(cube, coord, name="weekday"): """Add a categorical weekday coordinate, values 'Mon'..'Sun'.""" - add_categorised_coord( - cube, name, coord, - lambda coord, x: calendar.day_abbr[_pt_date(coord, x).weekday()], - units='no_unit') + + def get_weekday(_, value: cftime.datetime) -> str: + return calendar.day_abbr[value.dayofwk] + + add_categorised_coord(cube, name, coord, get_weekday, units="no_unit") # -------------------------------------------- # Time categorisations : hour of the day -def add_hour(cube, coord, name='hour'): + +def add_hour(cube, coord, name="hour"): """Add a categorical hour coordinate, values 0..23.""" - add_categorised_coord( - cube, name, coord, - lambda coord, x: _pt_date(coord, x).hour) + + def get_hour(_, value: cftime.datetime) -> int: + return value.hour + + add_categorised_coord(cube, name, coord, get_hour) # ---------------------------------------------- # Time categorisations : meteorological seasons -def _months_in_season(season): - """ - Returns a list of month numbers corresponding to each month in the - given season. - """ - cyclic_months = 'jfmamjjasondjfmamjjasond' +def _months_in_season(season): + """Return a list of month numbers corresponding to each month in the given season.""" + cyclic_months = "jfmamjjasondjfmamjjasond" m0 = cyclic_months.find(season.lower()) if m0 < 0: # Can't match the season, raise an error. - raise ValueError('unrecognised season: {!s}'.format(season)) + raise ValueError("unrecognised season: {!s}".format(season)) m1 = m0 + len(season) return [(month % 12) + 1 for month in range(m0, m1)] @@ -250,34 +232,54 @@ def _validate_seasons(seasons): for season in seasons: c.update(_months_in_season(season)) # Make a list of months that are not present... - not_present = [calendar.month_abbr[month] for month in range(1, 13) - if month not in c] + not_present = [ + calendar.month_abbr[month] for month in range(1, 13) if month not in c + ] if not_present: - raise ValueError('some months do not appear in any season: ' - '{!s}'.format(', '.join(not_present))) + raise ValueError( + "some months do not appear in any season: {!s}".format( + ", ".join(not_present) + ) + ) # Make a list of months that appear multiple times... - multi_present = [calendar.month_abbr[month] for month in range(1, 13) - if c[month] > 1] + multi_present = [ + calendar.month_abbr[month] for month in range(1, 13) if c[month] > 1 + ] if multi_present: - raise ValueError('some months appear in more than one season: ' - '{!s}'.format(', '.join(multi_present))) + raise ValueError( + "some months appear in more than one season: {!s}".format( + ", ".join(multi_present) + ) + ) return -def _month_year_adjusts(seasons): +def _month_year_adjusts(seasons, use_year_at_season_start=False): """Compute the year adjustments required for each month. - These determine whether the month belongs to a season in the same - year or is in the start of a season that counts towards the next - year. + These adjustments ensure that no season spans two years by assigning months + to the **next** year (use_year_at_season_start is False) or the + **previous** year (use_year_at_season_start is True). E.g. Winter - djf: + either assign Dec to the next year, or Jan and Feb to the previous year. """ - month_year_adjusts = [None, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + # 1 'slot' for each month, with an extra leading 'slot' because months + # are 1-indexed - January is 1, therefore corresponding to the 2nd + # array index. + month_year_adjusts = np.zeros(13, dtype=int) + for season in seasons: - months = _months_in_season(season) - for month in months: - if month > months[-1]: - month_year_adjusts[month] = 1 + months = np.array(_months_in_season(season)) + if use_year_at_season_start: + months_to_shift = months < months[0] + year_shift = -1 + else: + # Sending forwards. + months_to_shift = months > months[-1] + year_shift = 1 + indices_to_shift = months[np.flatnonzero(months_to_shift)] + month_year_adjusts[indices_to_shift] = year_shift + return month_year_adjusts @@ -295,25 +297,19 @@ def _month_season_numbers(seasons): return month_season_numbers -def add_season(cube, coord, name='season', - seasons=('djf', 'mam', 'jja', 'son')): - """ - Add a categorical season-of-year coordinate, with user specified - seasons. - - Args: +def add_season(cube, coord, name="season", seasons=("djf", "mam", "jja", "son")): + """Add a categorical season-of-year coordinate, with user specified seasons. - * cube (:class:`iris.cube.Cube`): + Parameters + ---------- + cube : :class:`iris.cube.Cube` The cube containing 'coord'. The new coord will be added into it. - * coord (:class:`iris.coords.Coord` or string): + coord : :class:`iris.coords.Coord` or str Coordinate in 'cube', or its name, representing time. - - Kwargs: - - * name (string): + name : str, default="season" Name of the created coordinate. Defaults to "season". - * seasons (:class:`list` of strings): + seasons : :class:`list` of str, optional List of seasons defined by month abbreviations. Each month must appear once and only once. Defaults to standard meteorological seasons ('djf', 'mam', 'jja', 'son'). @@ -326,33 +322,31 @@ def add_season(cube, coord, name='season', month_season_numbers = _month_season_numbers(seasons) # Define a categorisation function. - def _season(coord, value): - dt = _pt_date(coord, value) - return seasons[month_season_numbers[dt.month]] + def _season(_, value: cftime.datetime) -> str: + return seasons[month_season_numbers[value.month]] # Apply the categorisation. - add_categorised_coord(cube, name, coord, _season, units='no_unit') + add_categorised_coord(cube, name, coord, _season, units="no_unit") -def add_season_number(cube, coord, name='season_number', - seasons=('djf', 'mam', 'jja', 'son')): - """ +def add_season_number( + cube, coord, name="season_number", seasons=("djf", "mam", "jja", "son") +): + """Add a categorical season-of-year coordinate. + Add a categorical season-of-year coordinate, values 0..N-1 where N is the number of user specified seasons. - Args: - - * cube (:class:`iris.cube.Cube`): + Parameters + ---------- + cube : :class:`iris.cube.Cube` The cube containing 'coord'. The new coord will be added into it. - * coord (:class:`iris.coords.Coord` or string): + coord : :class:`iris.coords.Coord` or str Coordinate in 'cube', or its name, representing time. - - Kwargs: - - * name (string): + name : str, default="season" Name of the created coordinate. Defaults to "season_number". - * seasons (:class:`list` of strings): + seasons : :class:`list` of str, optional List of seasons defined by month abbreviations. Each month must appear once and only once. Defaults to standard meteorological seasons ('djf', 'mam', 'jja', 'son'). @@ -365,84 +359,80 @@ def add_season_number(cube, coord, name='season_number', month_season_numbers = _month_season_numbers(seasons) # Define a categorisation function. - def _season_number(coord, value): - dt = _pt_date(coord, value) - return month_season_numbers[dt.month] + def _season_number(_, value: cftime.datetime) -> int: + return month_season_numbers[value.month] # Apply the categorisation. add_categorised_coord(cube, name, coord, _season_number) -def add_season_year(cube, coord, name='season_year', - seasons=('djf', 'mam', 'jja', 'son')): - """ - Add a categorical year-of-season coordinate, with user specified - seasons. - - Args: - - * cube (:class:`iris.cube.Cube`): - The cube containing 'coord'. The new coord will be added into - it. - * coord (:class:`iris.coords.Coord` or string): - Coordinate in 'cube', or its name, representing time. - - Kwargs: - - * name (string): - Name of the created coordinate. Defaults to "season_year". - * seasons (:class:`list` of strings): +def add_season_year( + cube, + coord, + name="season_year", + seasons=("djf", "mam", "jja", "son"), + use_year_at_season_start=False, +): + """Add a categorical year-of-season coordinate, with user specified seasons. + + Parameters + ---------- + cube : :class:`iris.cube.Cube` + The cube containing `coord`. The new coord will be added into it. + coord : :class:`iris.coords.Coord` or str + Coordinate in `cube`, or its name, representing time. + name : str, default="season_year" + Name of the created coordinate. + seasons : tuple of str, default=("djf", "mam", "jja", "son") List of seasons defined by month abbreviations. Each month must appear once and only once. Defaults to standard meteorological - seasons ('djf', 'mam', 'jja', 'son'). + seasons (``djf``, ``mam``, ``jja``, ``son``). + use_year_at_season_start : bool, default=False + Seasons spanning the year boundary (e.g. Winter ``djf``) will belong + fully to the following year by default (e.g. the year of Jan and Feb). + Set to ``True`` for spanning seasons to belong to the preceding + year (e.g. the year of Dec) instead. """ # Check that the seasons are valid. _validate_seasons(seasons) # Define the adjustments to be made to the year. - month_year_adjusts = _month_year_adjusts(seasons) + month_year_adjusts = _month_year_adjusts( + seasons, use_year_at_season_start=use_year_at_season_start + ) # Define a categorisation function. - def _season_year(coord, value): - dt = _pt_date(coord, value) - year = dt.year - year += month_year_adjusts[dt.month] + def _season_year(_, value: cftime.datetime) -> int: + year = value.year + year += month_year_adjusts[value.month] return year # Apply the categorisation. add_categorised_coord(cube, name, coord, _season_year) -def add_season_membership(cube, coord, season, name='season_membership'): - """ - Add a categorical season membership coordinate for a user specified - season. +def add_season_membership(cube, coord, season, name="season_membership"): + """Add a categorical season membership coordinate for a user specified season. The coordinate has the value True for every time that is within the given season, and the value False otherwise. - Args: - - * cube (:class:`iris.cube.Cube`): + Parameters + ---------- + cube : :class:`iris.cube.Cube` The cube containing 'coord'. The new coord will be added into it. - * coord (:class:`iris.coords.Coord` or string): + coord : :class:`iris.coords.Coord` or str Coordinate in 'cube', or its name, representing time. - * season (string): + season : str Season defined by month abbreviations. - - Kwargs: - - * name (string): + name : str, default="season_membership" Name of the created coordinate. Defaults to "season_membership". """ months = _months_in_season(season) - def _season_membership(coord, value): - dt = _pt_date(coord, value) - if dt.month in months: - return True - return False + def _season_membership(_, value: cftime.datetime) -> bool: + return value.month in months add_categorised_coord(cube, name, coord, _season_membership) diff --git a/lib/iris/coord_systems.py b/lib/iris/coord_systems.py index eab49e36de..4da46ae249 100644 --- a/lib/iris/coord_systems.py +++ b/lib/iris/coord_systems.py @@ -1,47 +1,78 @@ -# (C) British Crown Copyright 2010 - 2019, Met Office +# Copyright Iris contributors # -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . -""" -Definitions of coordinate systems. - -""" - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa -import six +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Definitions of coordinate systems.""" from abc import ABCMeta, abstractmethod +from functools import cached_property +import re +from typing import ClassVar import warnings -import numpy as np -import cartopy import cartopy.crs as ccrs +import numpy as np +from iris._deprecation import warn_deprecated +import iris.warnings -class CoordSystem(six.with_metaclass(ABCMeta, object)): - """ - Abstract base class for coordinate systems. - """ +def _arg_default(value, default, cast_as=float): + """Apply a default value and type for an optional kwarg.""" + if value is None: + value = default + value = cast_as(value) + return value - grid_mapping_name = None + +def _1or2_parallels(arg): + """Accept 1 or 2 inputs as a tuple of 1 or 2 floats.""" + try: + values_tuple = tuple(arg) + except TypeError: + values_tuple = (arg,) + values_tuple = tuple([float(x) for x in values_tuple]) + nvals = len(values_tuple) + if nvals not in (1, 2): + emsg = "Allows only 1 or 2 parallels or secant latitudes : got {!r}" + raise ValueError(emsg.format(arg)) + return values_tuple + + +def _float_or_None(arg): + """Cast as float, except for allowing None as a distinct valid value.""" + if arg is not None: + arg = float(arg) + return arg + + +class CoordSystem(metaclass=ABCMeta): + """Abstract base class for coordinate systems.""" + + grid_mapping_name: ClassVar[str | None] = None def __eq__(self, other): - return (self.__class__ == other.__class__ and - self.__dict__ == other.__dict__) + """Override equality. + + The `_globe` and `_crs` attributes are not compared because they are + cached properties and completely derived from other attributes. The + nature of caching means that they can appear on one object and not on + another despite the objects being identical, and them being completely + derived from other attributes means they will only differ if other + attributes that are being tested for equality differ. + """ + if self.__class__ != other.__class__: + return False + self_keys = set(self.__dict__.keys()) + other_keys = set(other.__dict__.keys()) + check_keys = (self_keys | other_keys) - {"_globe", "_crs"} + for key in check_keys: + try: + if self.__dict__[key] != other.__dict__[key]: + return False + except KeyError: + return False + return True def __ne__(self, other): # Must supply __ne__, Python does not defer to __eq__ for @@ -49,15 +80,13 @@ def __ne__(self, other): return not (self == other) def xml_element(self, doc, attrs=None): - """Default behaviour for coord systems.""" + """Perform default behaviour for coord systems.""" # attrs - optional list of (k,v) items, used for alternate output xml_element_name = type(self).__name__ # lower case the first char first_char = xml_element_name[0] - xml_element_name = xml_element_name.replace(first_char, - first_char.lower(), - 1) + xml_element_name = xml_element_name.replace(first_char, first_char.lower(), 1) coord_system_xml_element = doc.createElement(xml_element_name) @@ -67,11 +96,11 @@ def xml_element(self, doc, attrs=None): for name, value in attrs: if isinstance(value, float): - value_str = '{:.16}'.format(value) + value_str = "{:.16}".format(value) elif isinstance(value, np.float32): - value_str = '{:.8}'.format(value) + value_str = "{:.8}".format(value) else: - value_str = '{}'.format(value) + value_str = "{}".format(value) coord_system_xml_element.setAttribute(name, value_str) return coord_system_xml_element @@ -87,17 +116,12 @@ def _ellipsoid_to_globe(ellipsoid, globe_default): @abstractmethod def as_cartopy_crs(self): - """ - Return a cartopy CRS representing our native coordinate - system. - - """ + """Return a cartopy CRS representing our native coordinate system.""" pass @abstractmethod def as_cartopy_projection(self): - """ - Return a cartopy projection representing our native map. + """Return a cartopy projection representing our native map. This will be the same as the :func:`~CoordSystem.as_cartopy_crs` for map projections but for spherical coord systems (which are not map @@ -107,40 +131,58 @@ def as_cartopy_projection(self): pass +_short_datum_names = { + "OSGB 1936": "OSGB36", + "OSGB_1936": "OSGB36", + "WGS 84": "WGS84", +} + + class GeogCS(CoordSystem): - """ + """A geographic (ellipsoidal) coordinate system. + A geographic (ellipsoidal) coordinate system, defined by the shape of the Earth and a prime meridian. - """ grid_mapping_name = "latitude_longitude" - def __init__(self, semi_major_axis=None, semi_minor_axis=None, - inverse_flattening=None, longitude_of_prime_meridian=0): - """ - Creates a new GeogCS. - - Kwargs: - - * semi_major_axis - of ellipsoid in metres - * semi_minor_axis - of ellipsoid in metres - * inverse_flattening - of ellipsoid - * longitude_of_prime_meridian - Can be used to specify the - prime meridian on the ellipsoid - in degrees. Default = 0. - + def __init__( + self, + semi_major_axis=None, + semi_minor_axis=None, + inverse_flattening=None, + longitude_of_prime_meridian=None, + ): + """Create a new GeogCS. + + Parameters + ---------- + semi_major_axis, semi_minor_axis : optional + Axes of ellipsoid, in metres. At least one must be given (see note + below). + inverse_flattening : optional + Can be omitted if both axes given (see note below). Default 0.0. + longitude_of_prime_meridian : optional + Specifies the prime meridian on the ellipsoid, in degrees. Default 0.0. + + Notes + ----- If just semi_major_axis is set, with no semi_minor_axis or inverse_flattening, then a perfect sphere is created from the given radius. - If just two of semi_major_axis, semi_minor_axis, and - inverse_flattening are given the missing element is calulated from the - formula: + If just two of semi_major_axis, semi_minor_axis, and inverse_flattening + are given the missing element is calculated from the formula: :math:`flattening = (major - minor) / major` Currently, Iris will not allow over-specification (all three ellipsoid - paramaters). + parameters). + + After object creation, altering any of these properties will not update + the others. semi_major_axis and semi_minor_axis are used when creating + Cartopy objects. + Examples:: cs = GeogCS(6371229) @@ -153,65 +195,92 @@ def __init__(self, semi_major_axis=None, semi_minor_axis=None, """ # No ellipsoid specified? (0 0 0) - if ((semi_major_axis is None) and (semi_minor_axis is None) and - (inverse_flattening is None)): + if ( + (semi_major_axis is None) + and (semi_minor_axis is None) + and (inverse_flattening is None) + ): raise ValueError("No ellipsoid specified") # Ellipsoid over-specified? (1 1 1) - if ((semi_major_axis is not None) and (semi_minor_axis is not None) and - (inverse_flattening is not None)): + if ( + (semi_major_axis is not None) + and (semi_minor_axis is not None) + and (inverse_flattening is not None) + ): raise ValueError("Ellipsoid is overspecified") # Perfect sphere (semi_major_axis only)? (1 0 0) - elif (semi_major_axis is not None and (semi_minor_axis is None and - inverse_flattening is None)): + elif semi_major_axis is not None and ( + semi_minor_axis is None and not inverse_flattening + ): semi_minor_axis = semi_major_axis inverse_flattening = 0.0 # Calculate semi_major_axis? (0 1 1) - elif semi_major_axis is None and (semi_minor_axis is not None and - inverse_flattening is not None): - semi_major_axis = -semi_minor_axis / ((1.0 - inverse_flattening) / - inverse_flattening) + elif semi_major_axis is None and ( + semi_minor_axis is not None and inverse_flattening is not None + ): + semi_major_axis = -semi_minor_axis / ( + (1.0 - inverse_flattening) / inverse_flattening + ) # Calculate semi_minor_axis? (1 0 1) - elif semi_minor_axis is None and (semi_major_axis is not None and - inverse_flattening is not None): - semi_minor_axis = semi_major_axis - ((1.0 / inverse_flattening) * - semi_major_axis) + elif semi_minor_axis is None and ( + semi_major_axis is not None and inverse_flattening is not None + ): + semi_minor_axis = semi_major_axis - ( + (1.0 / inverse_flattening) * semi_major_axis + ) # Calculate inverse_flattening? (1 1 0) - elif inverse_flattening is None and (semi_major_axis is not None and - semi_minor_axis is not None): + elif inverse_flattening is None and ( + semi_major_axis is not None and semi_minor_axis is not None + ): if semi_major_axis == semi_minor_axis: inverse_flattening = 0.0 else: inverse_flattening = 1.0 / ( - (semi_major_axis - semi_minor_axis) / semi_major_axis) + (semi_major_axis - semi_minor_axis) / semi_major_axis + ) # We didn't get enough to specify an ellipse. else: raise ValueError("Insufficient ellipsoid specification") #: Major radius of the ellipsoid in metres. - self.semi_major_axis = float(semi_major_axis) + self._semi_major_axis = float(semi_major_axis) #: Minor radius of the ellipsoid in metres. - self.semi_minor_axis = float(semi_minor_axis) + self._semi_minor_axis = float(semi_minor_axis) - #: :math:`1/f` where :math:`f = (a-b)/a` - self.inverse_flattening = float(inverse_flattening) + #: :math:`1/f` where :math:`f = (a-b)/a`. + self._inverse_flattening = float(inverse_flattening) + + self._datum = None #: Describes 'zero' on the ellipsoid in degrees. - self.longitude_of_prime_meridian = float(longitude_of_prime_meridian) + self.longitude_of_prime_meridian = _arg_default(longitude_of_prime_meridian, 0) def _pretty_attrs(self): attrs = [("semi_major_axis", self.semi_major_axis)] if self.semi_major_axis != self.semi_minor_axis: attrs.append(("semi_minor_axis", self.semi_minor_axis)) if self.longitude_of_prime_meridian != 0.0: - attrs.append(("longitude_of_prime_meridian", - self.longitude_of_prime_meridian)) + attrs.append( + ( + "longitude_of_prime_meridian", + self.longitude_of_prime_meridian, + ) + ) + # An unknown crs datum will be treated as None + if self.datum is not None and self.datum != "unknown": + attrs.append( + ( + "datum", + self.datum, + ) + ) return attrs def __repr__(self): @@ -220,24 +289,23 @@ def __repr__(self): if len(attrs) == 1 and attrs[0][0] == "semi_major_axis": return "GeogCS(%r)" % self.semi_major_axis else: - return "GeogCS(%s)" % ", ".join( - ["%s=%r" % (k, v) for k, v in attrs]) + return "GeogCS(%s)" % ", ".join(["%s=%r" % (k, v) for k, v in attrs]) def __str__(self): attrs = self._pretty_attrs() # Special case for 1 pretty attr if len(attrs) == 1 and attrs[0][0] == "semi_major_axis": - return 'GeogCS({:.16})'.format(self.semi_major_axis) + return "GeogCS({:.16})".format(self.semi_major_axis) else: text_attrs = [] for k, v in attrs: if isinstance(v, float): - text_attrs.append('{}={:.16}'.format(k, v)) + text_attrs.append("{}={:.16}".format(k, v)) elif isinstance(v, np.float32): - text_attrs.append('{}={:.8}'.format(k, v)) + text_attrs.append("{}={:.8}".format(k, v)) else: - text_attrs.append('{}={}'.format(k, v)) - return 'GeogCS({})'.format(', '.join(text_attrs)) + text_attrs.append("{}={}".format(k, v)) + return "GeogCS({})".format(", ".join(text_attrs)) def xml_element(self, doc): # Special output for spheres @@ -248,48 +316,206 @@ def xml_element(self, doc): return CoordSystem.xml_element(self, doc, attrs) def as_cartopy_crs(self): - return ccrs.Geodetic(self.as_cartopy_globe()) + return self._crs def as_cartopy_projection(self): - return ccrs.PlateCarree() + return ccrs.PlateCarree( + central_longitude=self.longitude_of_prime_meridian, + globe=self.as_cartopy_globe(), + ) def as_cartopy_globe(self): - # Explicitly set `ellipse` to None as a workaround for - # Cartopy setting WGS84 as the default. - return ccrs.Globe(semimajor_axis=self.semi_major_axis, - semiminor_axis=self.semi_minor_axis, - ellipse=None) + return self._globe + @cached_property + def _globe(self): + """A representation of this CRS as a Cartopy Globe. -class RotatedGeogCS(CoordSystem): - """ - A coordinate system with rotated pole, on an optional :class:`GeogCS`. + Note + ---- + This property is created when required and then cached for speed. That + cached value is cleared when an assignment is made to a property of the + class that invalidates the cache. + """ + if self._datum is not None: + short_datum = _short_datum_names.get(self._datum, self._datum) + # Cartopy doesn't actually enact datums unless they're provided without + # ellipsoid axes, so only provide the datum + return ccrs.Globe(short_datum, ellipse=None) + else: + return ccrs.Globe( + ellipse=None, + semimajor_axis=self._semi_major_axis, + semiminor_axis=self._semi_minor_axis, + ) + + @cached_property + def _crs(self): + """A representation of this CRS as a Cartopy CRS. + + Note + ---- + This property is created when required and then cached for speed. That + cached value is cleared when an assignment is made to a property of the + class that invalidates the cache. - """ + """ + return ccrs.Geodetic(self._globe) - grid_mapping_name = "rotated_latitude_longitude" + def _wipe_cached_properties(self): + """Wipe the cached properties on the object. + + Wipe the cached properties on the object as part of any update to a + value that invalidates the cache. - def __init__(self, grid_north_pole_latitude, grid_north_pole_longitude, - north_pole_grid_longitude=0, ellipsoid=None): """ - Constructs a coordinate system with rotated pole, on an - optional :class:`GeogCS`. + try: + delattr(self, "_crs") + except AttributeError: + pass + try: + delattr(self, "_globe") + except AttributeError: + pass + + @property + def semi_major_axis(self): + if self._semi_major_axis is not None: + return self._semi_major_axis + else: + return self._crs.ellipsoid.semi_major_metre - Args: + @semi_major_axis.setter + def semi_major_axis(self, value): + """Assign semi_major_axis. - * grid_north_pole_latitude - The true latitude of the rotated - pole in degrees. - * grid_north_pole_longitude - The true longitude of the rotated - pole in degrees. + Setting this property to a different value invalidates the current datum + (if any) because a datum encodes a specific semi-major axis. This also + invalidates the cached `cartopy.Globe` and `cartopy.CRS`. + + """ + value = float(value) + if not np.isclose(self.semi_major_axis, value): + self._datum = None + self._wipe_cached_properties() + self._semi_major_axis = value + + @property + def semi_minor_axis(self): + if self._semi_minor_axis is not None: + return self._semi_minor_axis + else: + return self._crs.ellipsoid.semi_minor_metre - Kwargs: + @semi_minor_axis.setter + def semi_minor_axis(self, value): + """Assign semi_minor_axis. - * north_pole_grid_longitude - Longitude of true north pole in - rotated grid in degrees. Default = 0. - * ellipsoid - Optional :class:`GeogCS` defining - the ellipsoid. + Setting this property to a different value invalidates the current datum + (if any) because a datum encodes a specific semi-minor axis. This also + invalidates the cached `cartopy.Globe` and `cartopy.CRS`. + + """ + value = float(value) + if not np.isclose(self.semi_minor_axis, value): + self._datum = None + self._wipe_cached_properties() + self._semi_minor_axis = value + + @property + def inverse_flattening(self): + if self._inverse_flattening is not None: + return self._inverse_flattening + else: + self._crs.ellipsoid.inverse_flattening + + @inverse_flattening.setter + def inverse_flattening(self, value): + """Assign inverse_flattening. + + Setting this property to a different value does not affect the behaviour + of this object any further than the value of this property. + + """ + wmsg = ( + "Setting inverse_flattening does not affect other properties of " + "the GeogCS object. To change other properties set them explicitly" + " or create a new GeogCS instance." + ) + warnings.warn(wmsg, category=iris.warnings.IrisUserWarning) + value = float(value) + self._inverse_flattening = value + + @property + def datum(self): + if self._datum is None: + return None + else: + datum = self._datum + return datum + + @datum.setter + def datum(self, value): + """Assign datum. + + Setting this property to a different value invalidates the current + values of the ellipsoid measurements because a datum encodes its own + ellipse. This also invalidates the cached `cartopy.Globe` and + `cartopy.CRS`. + + """ + if self._datum != value: + self._semi_major_axis = None + self._semi_minor_axis = None + self._inverse_flattening = None + self._wipe_cached_properties() + self._datum = value + + @classmethod + def from_datum(cls, datum, longitude_of_prime_meridian=None): + crs = super().__new__(cls) + + crs._semi_major_axis = None + crs._semi_minor_axis = None + crs._inverse_flattening = None + + #: Describes 'zero' on the ellipsoid in degrees. + crs.longitude_of_prime_meridian = _arg_default(longitude_of_prime_meridian, 0) + + crs._datum = datum + + return crs - Examples:: + +class RotatedGeogCS(CoordSystem): + """A coordinate system with rotated pole, on an optional :class:`GeogCS`.""" + + grid_mapping_name = "rotated_latitude_longitude" + + def __init__( + self, + grid_north_pole_latitude, + grid_north_pole_longitude, + north_pole_grid_longitude=None, + ellipsoid=None, + ): + """Construct a coordinate system with rotated pole, on an optional :class:`GeogCS`. + + Parameters + ---------- + grid_north_pole_latitude : + The true latitude of the rotated pole in degrees. + grid_north_pole_longitude : + The true longitude of the rotated pole in degrees. + north_pole_grid_longitude : optional + Longitude of true north pole in rotated grid, in degrees. + Defaults to 0.0. + ellipsoid : :class:`GeogCS`, optional + If given, defines the ellipsoid. + + Examples + -------- + :: rotated_cs = RotatedGeogCS(30, 30) another_cs = RotatedGeogCS(30, 30, @@ -303,25 +529,25 @@ def __init__(self, grid_north_pole_latitude, grid_north_pole_longitude, self.grid_north_pole_longitude = float(grid_north_pole_longitude) #: Longitude of true north pole in rotated grid in degrees. - self.north_pole_grid_longitude = float(north_pole_grid_longitude) + self.north_pole_grid_longitude = _arg_default(north_pole_grid_longitude, 0) - #: Ellipsoid definition. + #: Ellipsoid definition (:class:`GeogCS` or None). self.ellipsoid = ellipsoid def _pretty_attrs(self): - attrs = [("grid_north_pole_latitude", self.grid_north_pole_latitude), - ("grid_north_pole_longitude", self.grid_north_pole_longitude)] + attrs = [ + ("grid_north_pole_latitude", self.grid_north_pole_latitude), + ("grid_north_pole_longitude", self.grid_north_pole_longitude), + ] if self.north_pole_grid_longitude != 0.0: - attrs.append(("north_pole_grid_longitude", - self.north_pole_grid_longitude)) + attrs.append(("north_pole_grid_longitude", self.north_pole_grid_longitude)) if self.ellipsoid is not None: attrs.append(("ellipsoid", self.ellipsoid)) return attrs def __repr__(self): attrs = self._pretty_attrs() - result = "RotatedGeogCS(%s)" % ", ".join( - ["%s=%r" % (k, v) for k, v in attrs]) + result = "RotatedGeogCS(%s)" % ", ".join(["%s=%r" % (k, v) for k, v in attrs]) # Extra prettiness result = result.replace("grid_north_pole_latitude=", "") result = result.replace("grid_north_pole_longitude=", "") @@ -332,12 +558,12 @@ def __str__(self): text_attrs = [] for k, v in attrs: if isinstance(v, float): - text_attrs.append('{}={:.16}'.format(k, v)) + text_attrs.append("{}={:.16}".format(k, v)) elif isinstance(v, np.float32): - text_attrs.append('{}={:.8}'.format(k, v)) + text_attrs.append("{}={:.8}".format(k, v)) else: - text_attrs.append('{}={}'.format(k, v)) - result = 'RotatedGeogCS({})'.format(', '.join(text_attrs)) + text_attrs.append("{}={}".format(k, v)) + result = "RotatedGeogCS({})".format(", ".join(text_attrs)) # Extra prettiness result = result.replace("grid_north_pole_latitude=", "") result = result.replace("grid_north_pole_longitude=", "") @@ -348,20 +574,13 @@ def xml_element(self, doc): def _ccrs_kwargs(self): globe = self._ellipsoid_to_globe(self.ellipsoid, None) + cartopy_kwargs = { + "central_rotated_longitude": self.north_pole_grid_longitude, + "pole_longitude": self.grid_north_pole_longitude, + "pole_latitude": self.grid_north_pole_latitude, + "globe": globe, + } - # Cartopy v0.12 provided the new arg north_pole_grid_longitude - cartopy_kwargs = {'pole_longitude': self.grid_north_pole_longitude, - 'pole_latitude': self.grid_north_pole_latitude, - 'globe': globe} - - if cartopy.__version__ < '0.12': - warnings.warn('"central_rotated_longitude" is not supported by ' - 'cartopy{} and has been ignored in the ' - 'creation of the cartopy ' - 'projection/crs.'.format(cartopy.__version__)) - else: - crl = 'central_rotated_longitude' - cartopy_kwargs[crl] = self.north_pole_grid_longitude return cartopy_kwargs def as_cartopy_crs(self): @@ -372,44 +591,44 @@ def as_cartopy_projection(self): class TransverseMercator(CoordSystem): - """ - A cylindrical map projection, with XY coordinates measured in metres. - - """ + """A cylindrical map projection, with XY coordinates measured in metres.""" grid_mapping_name = "transverse_mercator" - def __init__(self, latitude_of_projection_origin, - longitude_of_central_meridian, false_easting, false_northing, - scale_factor_at_central_meridian, ellipsoid=None): - """ - Constructs a TransverseMercator object. - - Args: - - * latitude_of_projection_origin - True latitude of planar origin in degrees. - - * longitude_of_central_meridian - True longitude of planar origin in degrees. - - * false_easting - X offset from planar origin in metres. - - * false_northing - Y offset from planar origin in metres. - - * scale_factor_at_central_meridian - Reduces the cylinder to slice through the ellipsoid - (secant form). Used to provide TWO longitudes of zero - distortion in the area of interest. - - Kwargs: - - * ellipsoid - Optional :class:`GeogCS` defining the ellipsoid. - - Example:: + def __init__( + self, + latitude_of_projection_origin, + longitude_of_central_meridian, + false_easting=None, + false_northing=None, + scale_factor_at_central_meridian=None, + ellipsoid=None, + ): + """Construct a TransverseMercator object. + + Parameters + ---------- + latitude_of_projection_origin : + True latitude of planar origin in degrees. + longitude_of_central_meridian : + True longitude of planar origin in degrees. + false_easting : optional + X offset from planar origin in metres. + Defaults to 0.0. + false_northing : optional + Y offset from planar origin in metres. + Defaults to 0.0. + scale_factor_at_central_meridian : optional + Reduces the cylinder to slice through the ellipsoid + (secant form). Used to provide TWO longitudes of zero + distortion in the area of interest. + Defaults to 1.0 . + ellipsoid : :class:`GeogCS`, optional + If given, defines the ellipsoid. + + Examples + -------- + :: airy1830 = GeogCS(6377563.396, 6356256.909) osgb = TransverseMercator(49, -2, 400000, -100000, 0.9996012717, @@ -417,36 +636,39 @@ def __init__(self, latitude_of_projection_origin, """ #: True latitude of planar origin in degrees. - self.latitude_of_projection_origin = float( - latitude_of_projection_origin) + self.latitude_of_projection_origin = float(latitude_of_projection_origin) #: True longitude of planar origin in degrees. - self.longitude_of_central_meridian = float( - longitude_of_central_meridian) + self.longitude_of_central_meridian = float(longitude_of_central_meridian) #: X offset from planar origin in metres. - self.false_easting = float(false_easting) + self.false_easting = _arg_default(false_easting, 0) #: Y offset from planar origin in metres. - self.false_northing = float(false_northing) + self.false_northing = _arg_default(false_northing, 0) - #: Reduces the cylinder to slice through the ellipsoid (secant form). - self.scale_factor_at_central_meridian = float( - scale_factor_at_central_meridian) + #: Scale factor at the centre longitude. + self.scale_factor_at_central_meridian = _arg_default( + scale_factor_at_central_meridian, 1.0 + ) - #: Ellipsoid definition. + #: Ellipsoid definition (:class:`GeogCS` or None). self.ellipsoid = ellipsoid def __repr__(self): - return "TransverseMercator(latitude_of_projection_origin={!r}, "\ - "longitude_of_central_meridian={!r}, false_easting={!r}, "\ - "false_northing={!r}, scale_factor_at_central_meridian={!r}, "\ - "ellipsoid={!r})".format(self.latitude_of_projection_origin, - self.longitude_of_central_meridian, - self.false_easting, - self.false_northing, - self.scale_factor_at_central_meridian, - self.ellipsoid) + return ( + "TransverseMercator(latitude_of_projection_origin={!r}, " + "longitude_of_central_meridian={!r}, false_easting={!r}, " + "false_northing={!r}, scale_factor_at_central_meridian={!r}, " + "ellipsoid={!r})".format( + self.latitude_of_projection_origin, + self.longitude_of_central_meridian, + self.false_easting, + self.false_northing, + self.scale_factor_at_central_meridian, + self.ellipsoid, + ) + ) def as_cartopy_crs(self): globe = self._ellipsoid_to_globe(self.ellipsoid, None) @@ -457,7 +679,8 @@ def as_cartopy_crs(self): false_easting=self.false_easting, false_northing=self.false_northing, scale_factor=self.scale_factor_at_central_meridian, - globe=globe) + globe=globe, + ) def as_cartopy_projection(self): return self.as_cartopy_crs() @@ -465,10 +688,17 @@ def as_cartopy_projection(self): class OSGB(TransverseMercator): """A Specific transverse mercator projection on a specific ellipsoid.""" + def __init__(self): - TransverseMercator.__init__(self, 49, -2, 400000, -100000, - 0.9996012717, - GeogCS(6377563.396, 6356256.909)) + TransverseMercator.__init__( + self, + 49, + -2, + 400000, + -100000, + 0.9996012717, + GeogCS(6377563.396, 6356256.909), + ) def as_cartopy_crs(self): return ccrs.OSGB() @@ -478,151 +708,149 @@ def as_cartopy_projection(self): class Orthographic(CoordSystem): - """ - An orthographic map projection. - - """ - - grid_mapping_name = 'orthographic' - - def __init__(self, latitude_of_projection_origin, - longitude_of_projection_origin, false_easting=0.0, - false_northing=0.0, ellipsoid=None): - """ - Constructs an Orthographic coord system. - - Args: - - * latitude_of_projection_origin: + """An orthographic map projection.""" + + grid_mapping_name = "orthographic" + + def __init__( + self, + latitude_of_projection_origin, + longitude_of_projection_origin, + false_easting=None, + false_northing=None, + ellipsoid=None, + ): + """Construct an Orthographic coord system. + + Parameters + ---------- + latitude_of_projection_origin : True latitude of planar origin in degrees. - - * longitude_of_projection_origin: + longitude_of_projection_origin : True longitude of planar origin in degrees. - - Kwargs: - - * false_easting - X offset from planar origin in metres. Defaults to 0. - - * false_northing - Y offset from planar origin in metres. Defaults to 0. - - * ellipsoid - :class:`GeogCS` defining the ellipsoid. + false_easting : optional + X offset from planar origin in metres. Defaults to 0.0. + false_northing : optional + Y offset from planar origin in metres. Defaults to 0.0. + ellipsoid : :class:`GeogCS`, optional + If given, defines the ellipsoid. """ #: True latitude of planar origin in degrees. - self.latitude_of_projection_origin = float( - latitude_of_projection_origin) + self.latitude_of_projection_origin = float(latitude_of_projection_origin) #: True longitude of planar origin in degrees. - self.longitude_of_projection_origin = float( - longitude_of_projection_origin) + self.longitude_of_projection_origin = float(longitude_of_projection_origin) #: X offset from planar origin in metres. - self.false_easting = float(false_easting) + self.false_easting = _arg_default(false_easting, 0) #: Y offset from planar origin in metres. - self.false_northing = float(false_northing) + self.false_northing = _arg_default(false_northing, 0) - #: Ellipsoid definition. + #: Ellipsoid definition (:class:`GeogCS` or None). self.ellipsoid = ellipsoid def __repr__(self): - return "Orthographic(latitude_of_projection_origin={!r}, "\ - "longitude_of_projection_origin={!r}, "\ - "false_easting={!r}, false_northing={!r}, "\ - "ellipsoid={!r})".format(self.latitude_of_projection_origin, - self.longitude_of_projection_origin, - self.false_easting, - self.false_northing, - self.ellipsoid) + return ( + "Orthographic(latitude_of_projection_origin={!r}, " + "longitude_of_projection_origin={!r}, " + "false_easting={!r}, false_northing={!r}, " + "ellipsoid={!r})".format( + self.latitude_of_projection_origin, + self.longitude_of_projection_origin, + self.false_easting, + self.false_northing, + self.ellipsoid, + ) + ) def as_cartopy_crs(self): globe = self._ellipsoid_to_globe(self.ellipsoid, ccrs.Globe()) - warnings.warn('Discarding false_easting and false_northing that are ' - 'not used by Cartopy.') + warnings.warn( + "Discarding false_easting and false_northing that are " + "not used by Cartopy.", + category=iris.warnings.IrisDefaultingWarning, + ) return ccrs.Orthographic( central_longitude=self.longitude_of_projection_origin, central_latitude=self.latitude_of_projection_origin, - globe=globe) + globe=globe, + ) def as_cartopy_projection(self): return self.as_cartopy_crs() class VerticalPerspective(CoordSystem): - """ - A vertical/near-side perspective satellite image map projection. - - """ - - grid_mapping_name = 'vertical_perspective' - - def __init__(self, latitude_of_projection_origin, - longitude_of_projection_origin, perspective_point_height, - false_easting=0, false_northing=0, ellipsoid=None): - """ - Constructs a Vertical Perspective coord system. - - Args: - - * latitude_of_projection_origin: + """A vertical/near-side perspective satellite image map projection.""" + + grid_mapping_name = "vertical_perspective" + + def __init__( + self, + latitude_of_projection_origin, + longitude_of_projection_origin, + perspective_point_height, + false_easting=None, + false_northing=None, + ellipsoid=None, + ): + """Construct a Vertical Perspective coord system. + + Parameters + ---------- + latitude_of_projection_origin : True latitude of planar origin in degrees. - - * longitude_of_projection_origin: + longitude_of_projection_origin : True longitude of planar origin in degrees. - - * perspective_point_height: + perspective_point_height : Altitude of satellite in metres above the surface of the ellipsoid. - - Kwargs: - - * false_easting - X offset from planar origin in metres. Defaults to 0. - - * false_northing - Y offset from planar origin in metres. Defaults to 0. - - * ellipsoid - :class:`GeogCS` defining the ellipsoid. + false_easting : optional + X offset from planar origin in metres. Defaults to 0.0. + false_northing : optional + Y offset from planar origin in metres. Defaults to 0.0. + ellipsoid : :class:`GeogCS`, optional + If given, defines the ellipsoid. """ #: True latitude of planar origin in degrees. - self.latitude_of_projection_origin = float( - latitude_of_projection_origin) + self.latitude_of_projection_origin = float(latitude_of_projection_origin) #: True longitude of planar origin in degrees. - self.longitude_of_projection_origin = float( - longitude_of_projection_origin) + self.longitude_of_projection_origin = float(longitude_of_projection_origin) #: Altitude of satellite in metres. - # test if perspective_point_height may be cast to float for proj.4 self.perspective_point_height = float(perspective_point_height) + # TODO: test if may be cast to float for proj.4 #: X offset from planar origin in metres. - self.false_easting = float(false_easting) + self.false_easting = _arg_default(false_easting, 0) #: Y offset from planar origin in metres. - self.false_northing = float(false_northing) + self.false_northing = _arg_default(false_northing, 0) - #: Ellipsoid definition. + #: Ellipsoid definition (:class:`GeogCS` or None). self.ellipsoid = ellipsoid def __repr__(self): - return "Vertical Perspective(latitude_of_projection_origin={!r}, "\ - "longitude_of_projection_origin={!r}, "\ - "perspective_point_height={!r}, "\ - "false_easting={!r}, false_northing={!r}, "\ - "ellipsoid={!r})".format(self.latitude_of_projection_origin, - self.longitude_of_projection_origin, - self.perspective_point_height, - self.false_easting, - self.false_northing, - self.ellipsoid) + return ( + "Vertical Perspective(latitude_of_projection_origin={!r}, " + "longitude_of_projection_origin={!r}, " + "perspective_point_height={!r}, " + "false_easting={!r}, false_northing={!r}, " + "ellipsoid={!r})".format( + self.latitude_of_projection_origin, + self.longitude_of_projection_origin, + self.perspective_point_height, + self.false_easting, + self.false_northing, + self.ellipsoid, + ) + ) def as_cartopy_crs(self): globe = self._ellipsoid_to_globe(self.ellipsoid, ccrs.Globe()) @@ -633,94 +861,92 @@ def as_cartopy_crs(self): satellite_height=self.perspective_point_height, false_easting=self.false_easting, false_northing=self.false_northing, - globe=globe) + globe=globe, + ) def as_cartopy_projection(self): return self.as_cartopy_crs() class Geostationary(CoordSystem): - """ - A geostationary satellite image map projection. - - """ - - grid_mapping_name = 'geostationary' - - def __init__(self, latitude_of_projection_origin, - longitude_of_projection_origin, - perspective_point_height, sweep_angle_axis, false_easting=0, - false_northing=0, ellipsoid=None): - - """ - Constructs a Geostationary coord system. - - Args: - - * latitude_of_projection_origin (float): + """A geostationary satellite image map projection.""" + + grid_mapping_name = "geostationary" + + def __init__( + self, + latitude_of_projection_origin, + longitude_of_projection_origin, + perspective_point_height, + sweep_angle_axis, + false_easting=None, + false_northing=None, + ellipsoid=None, + ): + """Construct a Geostationary coord system. + + Parameters + ---------- + latitude_of_projection_origin : True latitude of planar origin in degrees. - - * longitude_of_projection_origin (float): + longitude_of_projection_origin : True longitude of planar origin in degrees. - - * perspective_point_height (float): + perspective_point_height : Altitude of satellite in metres above the surface of the ellipsoid. - - * sweep_angle_axis (string): + sweep_angle_axis : str The axis along which the satellite instrument sweeps - 'x' or 'y'. - - Kwargs: - - * false_easting (float): - X offset from planar origin in metres. Defaults to 0. - - * false_northing (float): - Y offset from planar origin in metres. Defaults to 0. - - * ellipsoid (iris.coord_systems.GeogCS): - :class:`GeogCS` defining the ellipsoid. + false_easting : optional + X offset from planar origin in metres. Defaults to 0.0. + false_northing : optional + Y offset from planar origin in metres. Defaults to 0.0. + ellipsoid : :class:`GeogCS`, optional + If given, defines the ellipsoid. """ #: True latitude of planar origin in degrees. - self.latitude_of_projection_origin = float( - latitude_of_projection_origin) + self.latitude_of_projection_origin = float(latitude_of_projection_origin) if self.latitude_of_projection_origin != 0.0: - raise ValueError('Non-zero latitude of projection currently not' - ' supported by Cartopy.') + raise ValueError( + "Non-zero latitude of projection currently not supported by Cartopy." + ) #: True longitude of planar origin in degrees. - self.longitude_of_projection_origin = float( - longitude_of_projection_origin) + self.longitude_of_projection_origin = float(longitude_of_projection_origin) #: Altitude of satellite in metres. - # test if perspective_point_height may be cast to float for proj.4 self.perspective_point_height = float(perspective_point_height) + # TODO: test if may be cast to float for proj.4 #: X offset from planar origin in metres. - self.false_easting = float(false_easting) + self.false_easting = _arg_default(false_easting, 0) #: Y offset from planar origin in metres. - self.false_northing = float(false_northing) + self.false_northing = _arg_default(false_northing, 0) - #: The axis along which the satellite instrument sweeps - 'x' or 'y'. + #: The sweep angle axis (string 'x' or 'y'). self.sweep_angle_axis = sweep_angle_axis - if self.sweep_angle_axis not in ('x', 'y'): + if self.sweep_angle_axis not in ("x", "y"): raise ValueError('Invalid sweep_angle_axis - must be "x" or "y"') - #: Ellipsoid definition. + #: Ellipsoid definition (:class:`GeogCS` or None). self.ellipsoid = ellipsoid def __repr__(self): - return "Geostationary(latitude_of_projection_origin={!r}, " \ - "longitude_of_projection_origin={!r}, " \ - "perspective_point_height={!r}, false_easting={!r}, " \ - "false_northing={!r}, sweep_angle_axis={!r}, " \ - "ellipsoid={!r}".format(self.latitude_of_projection_origin, - self.longitude_of_projection_origin, - self.perspective_point_height, - self.false_easting, - self.false_northing, - self.sweep_angle_axis, self.ellipsoid) + return ( + "Geostationary(latitude_of_projection_origin={!r}, " + "longitude_of_projection_origin={!r}, " + "perspective_point_height={!r}, false_easting={!r}, " + "false_northing={!r}, sweep_angle_axis={!r}, " + "ellipsoid={!r}".format( + self.latitude_of_projection_origin, + self.longitude_of_projection_origin, + self.perspective_point_height, + self.false_easting, + self.false_northing, + self.sweep_angle_axis, + self.ellipsoid, + ) + ) def as_cartopy_crs(self): globe = self._ellipsoid_to_globe(self.ellipsoid, ccrs.Globe()) @@ -731,50 +957,53 @@ def as_cartopy_crs(self): false_easting=self.false_easting, false_northing=self.false_northing, globe=globe, - sweep_axis=self.sweep_angle_axis) + sweep_axis=self.sweep_angle_axis, + ) def as_cartopy_projection(self): return self.as_cartopy_crs() class Stereographic(CoordSystem): - """ - A stereographic map projection. - - """ + """A stereographic map projection.""" grid_mapping_name = "stereographic" - def __init__(self, central_lat, central_lon, - false_easting=0.0, false_northing=0.0, - true_scale_lat=None, ellipsoid=None): - """ - Constructs a Stereographic coord system. - - Args: - - * central_lat - The latitude of the pole. - - * central_lon - The central longitude, which aligns with the y axis. - - Kwargs: - - * false_easting - X offset from planar origin in metres. Defaults to 0. - - * false_northing - Y offset from planar origin in metres. Defaults to 0. - - * true_scale_lat - Latitude of true scale. - - * ellipsoid - :class:`GeogCS` defining the ellipsoid. + def __init__( + self, + central_lat, + central_lon, + false_easting=None, + false_northing=None, + true_scale_lat=None, + ellipsoid=None, + scale_factor_at_projection_origin=None, + ): + """Construct a Stereographic coord system. + + Parameters + ---------- + central_lat : float + The latitude of the pole. + central_lon : float + The central longitude, which aligns with the y axis. + false_easting : float, optional + X offset from planar origin in metres. + false_northing : float, optional + Y offset from planar origin in metres. + true_scale_lat : float, optional + Latitude of true scale. + ellipsoid : :class:`GeogCS`, optional + If given, defines the ellipsoid. + scale_factor_at_projection_origin : float, optional + Scale factor at the origin of the projection. + + Notes + ----- + It is only valid to provide one of true_scale_lat and + scale_factor_at_projection_origin """ - #: True latitude of planar origin in degrees. self.central_lat = float(central_lat) @@ -782,73 +1011,155 @@ def __init__(self, central_lat, central_lon, self.central_lon = float(central_lon) #: X offset from planar origin in metres. - self.false_easting = float(false_easting) + self.false_easting = _arg_default(false_easting, 0) #: Y offset from planar origin in metres. - self.false_northing = float(false_northing) + self.false_northing = _arg_default(false_northing, 0) #: Latitude of true scale. - self.true_scale_lat = float(true_scale_lat) if true_scale_lat else None - - #: Ellipsoid definition. + self.true_scale_lat = _arg_default(true_scale_lat, None, cast_as=_float_or_None) + #: Scale factor at projection origin. + self.scale_factor_at_projection_origin = _arg_default( + scale_factor_at_projection_origin, None, cast_as=_float_or_None + ) + # N.B. the way we use these parameters, we need them to default to None, + # and *not* to 0.0. + + if ( + self.true_scale_lat is not None + and self.scale_factor_at_projection_origin is not None + ): + raise ValueError( + "It does not make sense to provide both " + '"scale_factor_at_projection_origin" and "true_scale_latitude". ' + ) + + #: Ellipsoid definition (:class:`GeogCS` or None). self.ellipsoid = ellipsoid + def _repr_attributes(self): + if self.scale_factor_at_projection_origin is None: + scale_info = "true_scale_lat={!r}, ".format(self.true_scale_lat) + else: + scale_info = "scale_factor_at_projection_origin={!r}, ".format( + self.scale_factor_at_projection_origin + ) + return ( + f"(central_lat={self.central_lat}, central_lon={self.central_lon}, " + f"false_easting={self.false_easting}, false_northing={self.false_northing}, " + f"{scale_info}" + f"ellipsoid={self.ellipsoid})" + ) + def __repr__(self): - return "Stereographic(central_lat={!r}, central_lon={!r}, "\ - "false_easting={!r}, false_northing={!r}, "\ - "true_scale_lat={!r}, "\ - "ellipsoid={!r})".format(self.central_lat, self.central_lon, - self.false_easting, - self.false_northing, - self.true_scale_lat, - self.ellipsoid) + return "Stereographic" + self._repr_attributes() def as_cartopy_crs(self): globe = self._ellipsoid_to_globe(self.ellipsoid, ccrs.Globe()) return ccrs.Stereographic( - self.central_lat, self.central_lon, - self.false_easting, self.false_northing, - self.true_scale_lat, globe=globe) + self.central_lat, + self.central_lon, + self.false_easting, + self.false_northing, + self.true_scale_lat, + self.scale_factor_at_projection_origin, + globe=globe, + ) def as_cartopy_projection(self): return self.as_cartopy_crs() -class LambertConformal(CoordSystem): - """ - A coordinate system in the Lambert Conformal conic projection. - - """ +class PolarStereographic(Stereographic): + """A subclass of the stereographic map projection centred on a pole.""" + + grid_mapping_name = "polar_stereographic" + + def __init__( + self, + central_lat, + central_lon, + false_easting=None, + false_northing=None, + true_scale_lat=None, + scale_factor_at_projection_origin=None, + ellipsoid=None, + ): + """Construct a Polar Stereographic coord system. + + Parameters + ---------- + central_lat : {90, -90} + The latitude of the pole. + central_lon : float + The central longitude, which aligns with the y axis. + false_easting : float, optional + X offset from planar origin in metres. + false_northing : float, optional + Y offset from planar origin in metres. + true_scale_lat : float, optional + Latitude of true scale. + scale_factor_at_projection_origin : float, optional + Scale factor at the origin of the projection. + ellipsoid : :class:`GeogCS`, optional + If given, defines the ellipsoid. + + Notes + ----- + It is only valid to provide at most one of `true_scale_lat` and + `scale_factor_at_projection_origin`. - grid_mapping_name = "lambert_conformal_conic" - def __init__(self, central_lat=39.0, central_lon=-96.0, - false_easting=0.0, false_northing=0.0, - secant_latitudes=(33, 45), ellipsoid=None): """ - Constructs a LambertConformal coord system. - - Kwargs: - - * central_lat - The latitude of "unitary scale". - - * central_lon - The central longitude. + super().__init__( + central_lat=central_lat, + central_lon=central_lon, + false_easting=false_easting, + false_northing=false_northing, + true_scale_lat=true_scale_lat, + scale_factor_at_projection_origin=scale_factor_at_projection_origin, + ellipsoid=ellipsoid, + ) - * false_easting - X offset from planar origin in metres. + def __repr__(self): + return "PolarStereographic" + self._repr_attributes() - * false_northing - Y offset from planar origin in metres. - * secant_latitudes - Latitudes of secant intersection. +class LambertConformal(CoordSystem): + """A coordinate system in the Lambert Conformal conic projection.""" - * ellipsoid - :class:`GeogCS` defining the ellipsoid. + grid_mapping_name = "lambert_conformal_conic" + def __init__( + self, + central_lat=None, + central_lon=None, + false_easting=None, + false_northing=None, + secant_latitudes=None, + ellipsoid=None, + ): + """Construct a LambertConformal coord system. + + Parameters + ---------- + central_lat : optional + The latitude of "unitary scale". Defaults to 39.0 . + central_lon : optional + The central longitude. Defaults to -96.0 . + false_easting : optional + X offset from planar origin in metres. Defaults to 0.0. + false_northing : optional + Y offset from planar origin in metres. Defaults to 0.0. + secant_latitudes : number or iterable of 1 or 2 numbers, optional + Latitudes of secant intersection. One or two. + Defaults to (33.0, 45.0). + ellipsoid : :class:`GeogCS`, optional + If given, defines the ellipsoid. + + Notes + ----- .. note: Default arguments are for the familiar USA map: @@ -857,34 +1168,39 @@ def __init__(self, central_lat=39.0, central_lon=-96.0, secant_latitudes=(33, 45) """ - #: True latitude of planar origin in degrees. - self.central_lat = central_lat + self.central_lat = _arg_default(central_lat, 39.0) + #: True longitude of planar origin in degrees. - self.central_lon = central_lon + self.central_lon = _arg_default(central_lon, -96.0) + #: X offset from planar origin in metres. - self.false_easting = false_easting + self.false_easting = _arg_default(false_easting, 0) + #: Y offset from planar origin in metres. - self.false_northing = false_northing - #: The one or two standard parallels of the cone. - try: - self.secant_latitudes = tuple(secant_latitudes) - except TypeError: - self.secant_latitudes = (secant_latitudes,) - nlats = len(self.secant_latitudes) - if nlats == 0 or nlats > 2: - emsg = 'Either one or two secant latitudes required, got {}' - raise ValueError(emsg.format(nlats)) - #: Ellipsoid definition. + self.false_northing = _arg_default(false_northing, 0) + + #: The standard parallels of the cone (tuple of 1 or 2 floats). + self.secant_latitudes = _arg_default( + secant_latitudes, (33, 45), cast_as=_1or2_parallels + ) + + #: Ellipsoid definition (:class:`GeogCS` or None). self.ellipsoid = ellipsoid def __repr__(self): - return "LambertConformal(central_lat={!r}, central_lon={!r}, "\ - "false_easting={!r}, false_northing={!r}, "\ - "secant_latitudes={!r}, ellipsoid={!r})".format( - self.central_lat, self.central_lon, - self.false_easting, self.false_northing, - self.secant_latitudes, self.ellipsoid) + return ( + "LambertConformal(central_lat={!r}, central_lon={!r}, " + "false_easting={!r}, false_northing={!r}, " + "secant_latitudes={!r}, ellipsoid={!r})".format( + self.central_lat, + self.central_lon, + self.false_easting, + self.false_northing, + self.secant_latitudes, + self.ellipsoid, + ) + ) def as_cartopy_crs(self): # We're either north or south polar. Set a cutoff accordingly. @@ -899,57 +1215,104 @@ def as_cartopy_crs(self): globe = self._ellipsoid_to_globe(self.ellipsoid, ccrs.Globe()) - # Cartopy v0.12 deprecated the use of secant_latitudes. - if cartopy.__version__ < '0.12': - conic_position = dict(secant_latitudes=self.secant_latitudes) - else: - conic_position = dict(standard_parallels=self.secant_latitudes) - return ccrs.LambertConformal( central_longitude=self.central_lon, central_latitude=self.central_lat, false_easting=self.false_easting, false_northing=self.false_northing, - globe=globe, cutoff=cutoff, **conic_position) + globe=globe, + cutoff=cutoff, + standard_parallels=self.secant_latitudes, + ) def as_cartopy_projection(self): return self.as_cartopy_crs() class Mercator(CoordSystem): - """ - A coordinate system in the Mercator projection. - - """ + """A coordinate system in the Mercator projection.""" grid_mapping_name = "mercator" - def __init__(self, longitude_of_projection_origin=0.0, ellipsoid=None, - standard_parallel=0.0): - """ - Constructs a Mercator coord system. - - Kwargs: - * longitude_of_projection_origin - True longitude of planar origin in degrees. - * ellipsoid - :class:`GeogCS` defining the ellipsoid. - * standard_parallel - the latitude where the scale is 1. Defaults to 0 degrees. + def __init__( + self, + longitude_of_projection_origin=None, + ellipsoid=None, + standard_parallel=None, + scale_factor_at_projection_origin=None, + false_easting=None, + false_northing=None, + ): + """Construct a Mercator coord system. + + Parameters + ---------- + longitude_of_projection_origin : optional + True longitude of planar origin in degrees. Defaults to 0.0. + ellipsoid : :class:`GeogCS`, optional + If given, defines the ellipsoid. + standard_parallel : optional + The latitude where the scale is 1. Defaults to 0.0. + scale_factor_at_projection_origin : optional + Scale factor at natural origin. Defaults to unused. + false_easting : optional + X offset from the planar origin in metres. Defaults to 0.0. + false_northing : optional + Y offset from the planar origin in metres. Defaults to 0.0. + datum : optional + If given, specifies the datumof the coordinate system. Only + respected if iris.Future.daum_support is set. + + Notes + ----- + Only one of ``standard_parallel`` and + ``scale_factor_at_projection_origin`` should be included. """ #: True longitude of planar origin in degrees. - self.longitude_of_projection_origin = longitude_of_projection_origin - #: Ellipsoid definition. + self.longitude_of_projection_origin = _arg_default( + longitude_of_projection_origin, 0 + ) + + #: Ellipsoid definition (:class:`GeogCS` or None). self.ellipsoid = ellipsoid - #: The latitude where the scale is 1 (defaults to 0 degrees). - self.standard_parallel = standard_parallel + + # Initialise to None, then set based on arguments + #: The latitude where the scale is 1. + self.standard_parallel = None + # The scale factor at the origin of the projection + self.scale_factor_at_projection_origin = None + if scale_factor_at_projection_origin is None: + self.standard_parallel = _arg_default(standard_parallel, 0) + else: + if standard_parallel is None: + self.scale_factor_at_projection_origin = _arg_default( + scale_factor_at_projection_origin, 0 + ) + else: + raise ValueError( + "It does not make sense to provide both " + '"scale_factor_at_projection_origin" and ' + '"standard_parallel".' + ) + + #: X offset from the planar origin in metres. + self.false_easting = _arg_default(false_easting, 0) + + #: Y offset from the planar origin in metres. + self.false_northing = _arg_default(false_northing, 0) def __repr__(self): - res = ("Mercator(longitude_of_projection_origin=" - "{self.longitude_of_projection_origin!r}, " - "ellipsoid={self.ellipsoid!r}, " - "standard_parallel={self.standard_parallel!r})") + res = ( + "Mercator(longitude_of_projection_origin=" + "{self.longitude_of_projection_origin!r}, " + "ellipsoid={self.ellipsoid!r}, " + "standard_parallel={self.standard_parallel!r}, " + "scale_factor_at_projection_origin=" + "{self.scale_factor_at_projection_origin!r}, " + "false_easting={self.false_easting!r}, " + "false_northing={self.false_northing!r})" + ) return res.format(self=self) def as_cartopy_crs(self): @@ -958,65 +1321,76 @@ def as_cartopy_crs(self): return ccrs.Mercator( central_longitude=self.longitude_of_projection_origin, globe=globe, - latitude_true_scale=self.standard_parallel) + latitude_true_scale=self.standard_parallel, + scale_factor=self.scale_factor_at_projection_origin, + false_easting=self.false_easting, + false_northing=self.false_northing, + ) def as_cartopy_projection(self): return self.as_cartopy_crs() class LambertAzimuthalEqualArea(CoordSystem): - """ - A coordinate system in the Lambert Azimuthal Equal Area projection. - - """ + """A coordinate system in the Lambert Azimuthal Equal Area projection.""" grid_mapping_name = "lambert_azimuthal_equal_area" - def __init__(self, latitude_of_projection_origin=0.0, - longitude_of_projection_origin=0.0, - false_easting=0.0, false_northing=0.0, - ellipsoid=None): - """ - Constructs a Lambert Azimuthal Equal Area coord system. - - Kwargs: - - * latitude_of_projection_origin - True latitude of planar origin in degrees. Defaults to 0. - - * longitude_of_projection_origin - True longitude of planar origin in degrees. Defaults to 0. - - * false_easting - X offset from planar origin in metres. Defaults to 0. - - * false_northing - Y offset from planar origin in metres. Defaults to 0. - - * ellipsoid - :class:`GeogCS` defining the ellipsoid. + def __init__( + self, + latitude_of_projection_origin=None, + longitude_of_projection_origin=None, + false_easting=None, + false_northing=None, + ellipsoid=None, + ): + """Construct a Lambert Azimuthal Equal Area coord system. + + Parameters + ---------- + latitude_of_projection_origin : optional + True latitude of planar origin in degrees. Defaults to 0.0. + longitude_of_projection_origin : optional + True longitude of planar origin in degrees. Defaults to 0.0. + false_easting : optional + X offset from planar origin in metres. Defaults to 0.0. + false_northing : optional + Y offset from planar origin in metres. Defaults to 0.0. + ellipsoid : :class:`GeogCS`, optional + If given, defines the ellipsoid. """ #: True latitude of planar origin in degrees. - self.latitude_of_projection_origin = latitude_of_projection_origin + self.latitude_of_projection_origin = _arg_default( + latitude_of_projection_origin, 0 + ) + #: True longitude of planar origin in degrees. - self.longitude_of_projection_origin = longitude_of_projection_origin + self.longitude_of_projection_origin = _arg_default( + longitude_of_projection_origin, 0 + ) + #: X offset from planar origin in metres. - self.false_easting = false_easting + self.false_easting = _arg_default(false_easting, 0) + #: Y offset from planar origin in metres. - self.false_northing = false_northing - #: Ellipsoid definition. + self.false_northing = _arg_default(false_northing, 0) + + #: Ellipsoid definition (:class:`GeogCS` or None). self.ellipsoid = ellipsoid def __repr__(self): - return ("LambertAzimuthalEqualArea(latitude_of_projection_origin={!r}," - " longitude_of_projection_origin={!r}, false_easting={!r}," - " false_northing={!r}, ellipsoid={!r})").format( - self.latitude_of_projection_origin, - self.longitude_of_projection_origin, - self.false_easting, - self.false_northing, - self.ellipsoid) + return ( + "LambertAzimuthalEqualArea(latitude_of_projection_origin={!r}," + " longitude_of_projection_origin={!r}, false_easting={!r}," + " false_northing={!r}, ellipsoid={!r})" + ).format( + self.latitude_of_projection_origin, + self.longitude_of_projection_origin, + self.false_easting, + self.false_northing, + self.ellipsoid, + ) def as_cartopy_crs(self): globe = self._ellipsoid_to_globe(self.ellipsoid, ccrs.Globe()) @@ -1026,86 +1400,286 @@ def as_cartopy_crs(self): central_latitude=self.latitude_of_projection_origin, false_easting=self.false_easting, false_northing=self.false_northing, - globe=globe) + globe=globe, + ) def as_cartopy_projection(self): return self.as_cartopy_crs() class AlbersEqualArea(CoordSystem): - """ - A coordinate system in the Albers Conical Equal Area projection. - - """ + """A coordinate system in the Albers Conical Equal Area projection.""" grid_mapping_name = "albers_conical_equal_area" - def __init__(self, latitude_of_projection_origin=0.0, - longitude_of_central_meridian=0.0, - false_easting=0.0, false_northing=0.0, - standard_parallels=(20.0, 50.0), - ellipsoid=None): + def __init__( + self, + latitude_of_projection_origin=None, + longitude_of_central_meridian=None, + false_easting=None, + false_northing=None, + standard_parallels=None, + ellipsoid=None, + ): + """Construct a Albers Conical Equal Area coord system. + + Parameters + ---------- + latitude_of_projection_origin : optional + True latitude of planar origin in degrees. Defaults to 0.0. + longitude_of_central_meridian : optional + True longitude of planar central meridian in degrees. + Defaults to 0.0. + false_easting : optional + X offset from planar origin in metres. Defaults to 0.0. + false_northing : optional + Y offset from planar origin in metres. Defaults to 0.0. + standard_parallels : number or iterable of 1 or 2 numbers, optional + The one or two latitudes of correct scale. + Defaults to (20.0, 50.0). + ellipsoid : :class:`GeogCS`, optional + If given, defines the ellipsoid. + """ - Constructs a Albers Conical Equal Area coord system. + #: True latitude of planar origin in degrees. + self.latitude_of_projection_origin = _arg_default( + latitude_of_projection_origin, 0 + ) + + #: True longitude of planar central meridian in degrees. + self.longitude_of_central_meridian = _arg_default( + longitude_of_central_meridian, 0 + ) + + #: X offset from planar origin in metres. + self.false_easting = _arg_default(false_easting, 0) + + #: Y offset from planar origin in metres. + self.false_northing = _arg_default(false_northing, 0) + + #: The one or two latitudes of correct scale (tuple of 1 or 2 floats). + self.standard_parallels = _arg_default( + standard_parallels, (20, 50), cast_as=_1or2_parallels + ) + + #: Ellipsoid definition (:class:`GeogCS` or None). + self.ellipsoid = ellipsoid + + def __repr__(self): + return ( + "AlbersEqualArea(latitude_of_projection_origin={!r}," + " longitude_of_central_meridian={!r}, false_easting={!r}," + " false_northing={!r}, standard_parallels={!r}," + " ellipsoid={!r})" + ).format( + self.latitude_of_projection_origin, + self.longitude_of_central_meridian, + self.false_easting, + self.false_northing, + self.standard_parallels, + self.ellipsoid, + ) + + def as_cartopy_crs(self): + globe = self._ellipsoid_to_globe(self.ellipsoid, ccrs.Globe()) + + return ccrs.AlbersEqualArea( + central_longitude=self.longitude_of_central_meridian, + central_latitude=self.latitude_of_projection_origin, + false_easting=self.false_easting, + false_northing=self.false_northing, + standard_parallels=self.standard_parallels, + globe=globe, + ) - Kwargs: + def as_cartopy_projection(self): + return self.as_cartopy_crs() - * latitude_of_projection_origin - True latitude of planar origin in degrees. - Defaults to 0. - * longitude_of_central_meridian - True longitude of planar central meridian in degrees. - Defaults to 0. +class ObliqueMercator(CoordSystem): + """A cylindrical map projection, with XY coordinates measured in metres. - * false_easting - X offset from planar origin in metres. Defaults to 0. + Designed for regions not well suited to :class:`Mercator` or + :class:`TransverseMercator`, as the positioning of the cylinder is more + customisable. - * false_northing - Y offset from planar origin in metres. Defaults to 0. + See Also + -------- + RotatedMercator : + :class:`ObliqueMercator` with ``azimuth_of_central_line=90``. + + """ - * standard_parallels - The one or two latitudes of correct scale. - Defaults to (20,50). - * ellipsoid - :class:`GeogCS` defining the ellipsoid. + grid_mapping_name = "oblique_mercator" + + def __init__( + self, + azimuth_of_central_line, + latitude_of_projection_origin, + longitude_of_projection_origin, + false_easting=None, + false_northing=None, + scale_factor_at_projection_origin=None, + ellipsoid=None, + ): + """Construct an ObliqueMercator object. + + Parameters + ---------- + azimuth_of_central_line : float + Azimuth of centerline clockwise from north at the center point of + the centre line. + latitude_of_projection_origin : float + The true longitude of the central meridian in degrees. + longitude_of_projection_origin : float + The true latitude of the planar origin in degrees. + false_easting : float, optional + X offset from the planar origin in metres. + Defaults to 0.0. + false_northing : float, optional + Y offset from the planar origin in metres. + Defaults to 0.0. + scale_factor_at_projection_origin : float, optional + Scale factor at the central meridian. + Defaults to 1.0 . + ellipsoid : :class:`GeogCS`, optional + If given, defines the ellipsoid. + + Examples + -------- + >>> from iris.coord_systems import GeogCS, ObliqueMercator + >>> my_ellipsoid = GeogCS(6371229.0, None, 0.0) + >>> ObliqueMercator(90.0, -22.0, -59.0, -25000.0, -25000.0, 1., my_ellipsoid) + ObliqueMercator(azimuth_of_central_line=90.0, latitude_of_projection_origin=-22.0, longitude_of_projection_origin=-59.0, false_easting=-25000.0, false_northing=-25000.0, scale_factor_at_projection_origin=1.0, ellipsoid=GeogCS(6371229.0)) """ + #: Azimuth of centerline clockwise from north. + self.azimuth_of_central_line = float(azimuth_of_central_line) + #: True latitude of planar origin in degrees. - self.latitude_of_projection_origin = latitude_of_projection_origin - #: True longitude of planar central meridian in degrees. - self.longitude_of_central_meridian = longitude_of_central_meridian + self.latitude_of_projection_origin = float(latitude_of_projection_origin) + + #: True longitude of planar origin in degrees. + self.longitude_of_projection_origin = float(longitude_of_projection_origin) + #: X offset from planar origin in metres. - self.false_easting = false_easting + self.false_easting = _arg_default(false_easting, 0) + #: Y offset from planar origin in metres. - self.false_northing = false_northing - #: The one or two latitudes of correct scale. - self.standard_parallels = standard_parallels - #: Ellipsoid definition. + self.false_northing = _arg_default(false_northing, 0) + + #: Scale factor at the central meridian. + self.scale_factor_at_projection_origin = _arg_default( + scale_factor_at_projection_origin, 1.0 + ) + + #: Ellipsoid definition (:class:`GeogCS` or None). self.ellipsoid = ellipsoid def __repr__(self): - return ("AlbersEqualArea(latitude_of_projection_origin={!r}," - " longitude_of_central_meridian={!r}, false_easting={!r}," - " false_northing={!r}, standard_parallels={!r}," - " ellipsoid={!r})").format( - self.latitude_of_projection_origin, - self.longitude_of_central_meridian, - self.false_easting, - self.false_northing, - self.standard_parallels, - self.ellipsoid) + return ( + "{!s}(azimuth_of_central_line={!r}, " + "latitude_of_projection_origin={!r}, " + "longitude_of_projection_origin={!r}, false_easting={!r}, " + "false_northing={!r}, scale_factor_at_projection_origin={!r}, " + "ellipsoid={!r})".format( + self.__class__.__name__, + self.azimuth_of_central_line, + self.latitude_of_projection_origin, + self.longitude_of_projection_origin, + self.false_easting, + self.false_northing, + self.scale_factor_at_projection_origin, + self.ellipsoid, + ) + ) def as_cartopy_crs(self): - globe = self._ellipsoid_to_globe(self.ellipsoid, ccrs.Globe()) + globe = self._ellipsoid_to_globe(self.ellipsoid, None) - return ccrs.AlbersEqualArea( - central_longitude=self.longitude_of_central_meridian, + return ccrs.ObliqueMercator( + central_longitude=self.longitude_of_projection_origin, central_latitude=self.latitude_of_projection_origin, false_easting=self.false_easting, false_northing=self.false_northing, - standard_parallels=self.standard_parallels, - globe=globe) + scale_factor=self.scale_factor_at_projection_origin, + azimuth=self.azimuth_of_central_line, + globe=globe, + ) def as_cartopy_projection(self): return self.as_cartopy_crs() + + +class RotatedMercator(ObliqueMercator): + """:class:`ObliqueMercator` with ``azimuth_of_central_line=90``. + + As noted in CF versions 1.10 and earlier: + + The Rotated Mercator projection is an Oblique Mercator projection + with azimuth = +90. + + Notes + ----- + .. deprecated:: 3.8.0 + This coordinate system was introduced as already scheduled for removal + in a future release, since CF version 1.11 onwards now requires use of + :class:`ObliqueMercator` with ``azimuth_of_central_line=90.`` . + Any :class:`RotatedMercator` instances will always be saved to NetCDF + as the ``oblique_mercator`` grid mapping. + + """ + + def __init__( + self, + latitude_of_projection_origin, + longitude_of_projection_origin, + false_easting=None, + false_northing=None, + scale_factor_at_projection_origin=None, + ellipsoid=None, + ): + """Construct a RotatedMercator object. + + Parameters + ---------- + latitude_of_projection_origin : float + The true longitude of the central meridian in degrees. + longitude_of_projection_origin : float + The true latitude of the planar origin in degrees. + false_easting : float, optional + X offset from the planar origin in metres. + Defaults to 0.0. + false_northing : float, optional + Y offset from the planar origin in metres. + Defaults to 0.0. + scale_factor_at_projection_origin : float, optional + Scale factor at the central meridian. + Defaults to 1.0 . + ellipsoid : :class:`GeogCS`, optional + If given, defines the ellipsoid. + + """ + message = ( + "iris.coord_systems.RotatedMercator is deprecated, and will be " + "removed in a future release. Instead please use " + "iris.coord_systems.ObliqueMercator with " + "azimuth_of_central_line=90 ." + ) + warn_deprecated(message) + + super().__init__( + 90.0, + latitude_of_projection_origin, + longitude_of_projection_origin, + false_easting, + false_northing, + scale_factor_at_projection_origin, + ellipsoid, + ) + + def __repr__(self): + # Remove the azimuth argument from the parent repr. + result = super().__repr__() + result = re.sub(r"azimuth_of_central_line=\d*\.?\d*, ", "", result) + return result diff --git a/lib/iris/coords.py b/lib/iris/coords.py index 74d2507811..8afe9dad41 100644 --- a/lib/iris/coords.py +++ b/lib/iris/coords.py @@ -1,135 +1,1143 @@ -# (C) British Crown Copyright 2010 - 2019, Met Office +# Copyright Iris contributors # -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . -""" -Definitions of coordinates. - -""" +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Definitions of coordinates and other dimensional metadata.""" -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa -import six - -from abc import ABCMeta, abstractproperty +from abc import ABCMeta, abstractmethod from collections import namedtuple -try: # Python 3 - from collections.abc import Iterator -except ImportError: # Python 2.7 - from collections import Iterator +from collections.abc import Container import copy -from itertools import chain -from six.moves import zip_longest +from functools import lru_cache +from itertools import zip_longest import operator import warnings import zlib -import cftime +import dask.array as da import numpy as np import numpy.ma as ma from iris._data_manager import DataManager -from iris._deprecation import warn_deprecated import iris._lazy_data as _lazy -import iris.aux_factory +from iris.common import ( + AncillaryVariableMetadata, + BaseMetadata, + CellMeasureMetadata, + CFVariableMixin, + CoordMetadata, + DimCoordMetadata, + metadata_manager_factory, +) import iris.exceptions import iris.time import iris.util +import iris.warnings -from iris._cube_coord_common import CFVariableMixin -from iris.util import points_step +#: The default value for ignore_axis which controls guess_coord_axis' behaviour +DEFAULT_IGNORE_AXIS = False -class CoordDefn(namedtuple('CoordDefn', - ['standard_name', 'long_name', - 'var_name', 'units', - 'attributes', 'coord_system', - 'climatological'])): - """ - Criterion for identifying a specific type of :class:`DimCoord` or - :class:`AuxCoord` based on its metadata. +class _DimensionalMetadata(CFVariableMixin, metaclass=ABCMeta): + """Superclass for dimensional metadata.""" - """ + _MODE_ADD = 1 + _MODE_SUB = 2 + _MODE_MUL = 3 + _MODE_DIV = 4 + _MODE_RDIV = 5 + _MODE_SYMBOL = { + _MODE_ADD: "+", + _MODE_SUB: "-", + _MODE_MUL: "*", + _MODE_DIV: "/", + _MODE_RDIV: "/", + } + + # Used by printout methods : __str__ and __repr__ + # Overridden in subclasses : Coord->'points', Connectivity->'indices' + _values_array_name = "data" + + @abstractmethod + def __init__( + self, + values, + standard_name=None, + long_name=None, + var_name=None, + units=None, + attributes=None, + ): + """Construct a single dimensional metadata object. + + Parameters + ---------- + values : + The values of the dimensional metadata. + standard_name : optional + CF standard name of the dimensional metadata. + long_name : optional + Descriptive name of the dimensional metadata. + var_name : optional + The netCDF variable name for the dimensional metadata. + units : optional + The :class:`~cf_units.Unit` of the dimensional metadata's values. + Can be a string, which will be converted to a Unit object. + attributes : optional + A dictionary containing other cf and user-defined attributes. - __slots__ = () + """ + # Note: this class includes bounds handling code for convenience, but + # this can only run within instances which are also Coords, because + # only they may actually have bounds. This parent class has no + # bounds-related getter/setter properties, and no bounds keywords in + # its __init__ or __copy__ methods. The only bounds-related behaviour + # it provides is a 'has_bounds()' method, which always returns False. + + # Configure the metadata manager. + if not hasattr(self, "_metadata_manager"): + self._metadata_manager = metadata_manager_factory(BaseMetadata) + + #: CF standard name of the quantity that the metadata represents. + self.standard_name = standard_name + + #: Descriptive name of the metadata. + self.long_name = long_name + + #: The netCDF variable name for the metadata. + self.var_name = var_name + + #: Unit of the quantity that the metadata represents. + self.units = units + + #: Other attributes, including user specified attributes that + #: have no meaning to Iris. + self.attributes = attributes + + # Set up DataManager attributes and values. + self._values_dm = None + self._values = values + self._bounds_dm = None # Only ever set on Coord-derived instances. + + def __getitem__(self, keys): + """Return a new dimensional metadata whose values are obtained by conventional array indexing. + + .. note:: + + Indexing of a circular coordinate results in a non-circular + coordinate if the overall shape of the coordinate changes after + indexing. - def name(self, default='unknown'): """ - Returns a human-readable name. + # Note: this method includes bounds handling code, but it only runs + # within Coord type instances, as only these allow bounds to be set. + + # Fetch the values. + values = self._values_dm.core_data() + + # Index values with the keys. + _, values = iris.util._slice_data_with_keys(values, keys) + + # Copy values after indexing to avoid making metadata that is a + # view on another metadata. This will not realise lazy data. + values = values.copy() + + # If the metadata is a coordinate and it has bounds, repeat the above + # with the bounds. + copy_args = {} + if self.has_bounds(): + bounds = self._bounds_dm.core_data() + _, bounds = iris.util._slice_data_with_keys(bounds, keys) + # Pass into the copy method : for Coords, it has a 'bounds' key. + copy_args["bounds"] = bounds.copy() + + # The new metadata is a copy of the old one with replaced content. + new_metadata = self.copy(values, **copy_args) + + return new_metadata - First it tries self.standard_name, then it tries the 'long_name' - attribute, then the 'var_name' attribute, before falling back to - the value of `default` (which itself defaults to 'unknown'). + def copy(self, values=None): + """Return a copy of this dimensional metadata object. + + Parameters + ---------- + values : optional + An array of values for the new dimensional metadata object. + This may be a different shape to the original values array being + copied. """ - return self.standard_name or self.long_name or self.var_name or default + # Note: this is overridden in Coord subclasses, to add bounds handling + # and a 'bounds' keyword. + new_metadata = copy.deepcopy(self) + if values is not None: + new_metadata._values_dm = None + new_metadata._values = values - def __lt__(self, other): - if not isinstance(other, CoordDefn): - return NotImplemented + return new_metadata - def _sort_key(defn): - # Emulate Python 2 behaviour with None - return (defn.standard_name is not None, defn.standard_name, - defn.long_name is not None, defn.long_name, - defn.var_name is not None, defn.var_name, - defn.units is not None, defn.units, - defn.coord_system is not None, defn.coord_system) + @abstractmethod + def cube_dims(self, cube): + """Identify the cube dims of any _DimensionalMetadata object. - return _sort_key(self) < _sort_key(other) + Return the dimensions in the cube of a matching _DimensionalMetadata + object, if any. + Equivalent to cube.coord_dims(self) for a Coord, + or cube.cell_measure_dims for a CellMeasure, and so on. + Simplifies generic code to handle any _DimensionalMetadata objects. -class CoordExtent(namedtuple('_CoordExtent', ['name_or_coord', - 'minimum', - 'maximum', - 'min_inclusive', - 'max_inclusive'])): - """Defines a range of values for a coordinate.""" + """ + # Only makes sense for specific subclasses. + raise NotImplementedError() + + def _sanitise_array(self, src, ndmin): + if _lazy.is_lazy_data(src): + # Lazy data : just ensure ndmin requirement. + ndims_missing = ndmin - src.ndim + if ndims_missing <= 0: + result = src + else: + extended_shape = tuple([1] * ndims_missing + list(src.shape)) + result = src.reshape(extended_shape) + else: + # Real data : a few more things to do in this case. + # Ensure the array is writeable. + # NB. Returns the *same object* if src is already writeable. + result = np.require(src, requirements="W") + # Ensure the array has enough dimensions. + # NB. Returns the *same object* if result.ndim >= ndmin + func = ma.array if ma.isMaskedArray(result) else np.array + result = func(result, ndmin=ndmin, copy=False) + # We don't need to copy the data, but we do need to have our + # own view so we can control the shape, etc. + result = result.view() + return result + + @property + def _values(self): + """The _DimensionalMetadata values as a NumPy array.""" + return self._values_dm.data.view() + + @_values.setter + def _values(self, values): + # Set the values to a new array - as long as it's the same shape. + + # Ensure values has an ndmin of 1 and is either a numpy or lazy array. + # This will avoid Scalar _DimensionalMetadata with values of shape () + # rather than the desired (1,). + values = self._sanitise_array(values, 1) + + # Set or update DataManager. + if self._values_dm is None: + self._values_dm = DataManager(values) + else: + self._values_dm.data = values + + def _lazy_values(self): + """Return a lazy array representing the dimensional metadata values.""" + return self._values_dm.lazy_data() + + def _core_values(self): + """Value array of this dimensional metadata which may be a NumPy array or a dask array.""" + result = self._values_dm.core_data() + if not _lazy.is_lazy_data(result): + result = result.view() + + return result + + def _has_lazy_values(self): + """Indicate whether the metadata's values array is a lazy dask array or not.""" + return self._values_dm.has_lazy_data() + + def summary( + self, + shorten=False, + max_values=None, + edgeitems=2, + linewidth=None, + precision=None, + convert_dates=True, + _section_indices=None, + ): + r"""Make a printable text summary. + + Parameters + ---------- + shorten : bool, default=False + If True, produce an abbreviated one-line summary. + If False, produce a multi-line summary, with embedded newlines. + max_values : int or None + If more than this many data values, print truncated data arrays + instead of full contents. + If 0, print only the shape. + The default is 5 if :attr:`shorten`\ =True, or 15 otherwise. + This overrides ``numpy.get_printoptions['threshold']``\ . + linewidth : int or None + Character-width controlling line splitting of array outputs. + If unset, defaults to ``numpy.get_printoptions['linewidth']``\ . + edgeitems : int, default=2 + Controls truncated array output. + Overrides ``numpy.getprintoptions['edgeitems']``\ . + precision : int or None + Controls number decimal formatting. + When :attr:`shorten`\ =True this is defaults to 3, in which case it + overrides ``numpy.get_printoptions()['precision']``\ . + convert_dates : bool, default=True + If the units has a calendar, then print array values as date + strings instead of the actual numbers. + + Returns + ------- + str + Output text, with embedded newlines when :attr:`shorten`\ =False. + + Notes + ----- + .. note:: + Arrays are formatted using :meth:`numpy.array2string`. Some aspects + of the array formatting are controllable in the usual way, via + :meth:`numpy.printoptions`, but others are overridden as detailed + above. + Control of those aspects is still available, but only via the call + arguments. + + """ + # NOTE: the *private* key "_section_indices" can be set to a dict, to + # return details of which (line, character) each particular section of + # the output text begins at. + # Currently only used by MeshCoord.summary(), which needs this info to + # modify the result string, for idiosyncratic reasons. + + def array_summary(data, n_max, n_edge, linewidth, precision): + # Return a text summary of an array. + # Take account of strings, dates and masked data. + result = "" + formatter = None + if convert_dates and self.units.is_time_reference(): + # Account for dates, if enabled. + # N.B. a time unit with a long time interval ("months" + # or "years") cannot be converted to a date using + # `num2date`, so gracefully fall back to printing + # values as numbers. + if not self.units.is_long_time_interval(): + # Otherwise ... replace all with strings. + if ma.is_masked(data): + mask = data.mask + else: + mask = None + data = np.array(self.units.num2date(data)) + data = data.astype(str) + # Masked datapoints do not survive num2date. + if mask is not None: + data = np.ma.masked_array(data, mask) + + if ma.is_masked(data): + # Masks are not handled by np.array2string, whereas + # MaskedArray.__str__ is using a private method to convert to + # objects. + # Our preferred solution is to convert to strings *and* fill + # with '--'. This is not ideal because numbers will not align + # with a common numeric format, but there is no *public* logic + # in numpy to arrange that, so let's not overcomplicate. + # It happens that array2string *also* does not use a common + # format (width) for strings, but we fix that below... + data = data.astype(str).filled("--") + + if data.dtype.kind == "U": + # Strings : N.B. includes all missing data + # find the longest. + length = max(len(str(x)) for x in data.flatten()) + # Pre-apply a common formatting width. + formatter = {"all": lambda x: str(x).ljust(length)} + + result = np.array2string( + data, + separator=", ", + edgeitems=n_edge, + threshold=n_max, + max_line_width=linewidth, + formatter=formatter, + precision=precision, + ) + + return result + + units_str = str(self.units) + if self.units.calendar and not shorten: + units_str += f", {self.units.calendar} calendar" + title_str = f"{self.name()} / ({units_str})" + cls_str = type(self).__name__ + shape_str = str(self.shape) + + # Implement conditional defaults for control args. + if max_values is None: + max_values = 5 if shorten else 15 + precision = 3 if shorten else None + n_indent = 4 + indent = " " * n_indent + newline_indent = "\n" + indent + if linewidth is not None: + given_array_width = linewidth + else: + given_array_width = np.get_printoptions()["linewidth"] + using_array_width = given_array_width - n_indent * 2 + # Make a printout of the main data array (or maybe not, if lazy). + if self._has_lazy_values(): + data_str = "" + elif max_values == 0: + data_str = "[...]" + else: + data_str = array_summary( + self._values, + n_max=max_values, + n_edge=edgeitems, + linewidth=using_array_width, + precision=precision, + ) + + # The output under construction, divided into lines for convenience. + output_lines = [""] + + def add_output(text, section=None): + # Append output text and record locations of named 'sections' + if section and _section_indices is not None: + # defined a named 'section', recording the current line number + # and character position as its start position + i_line = len(output_lines) - 1 + i_char = len(output_lines[-1]) + _section_indices[section] = (i_line, i_char) + # Split the text-to-add into lines + lines = text.split("\n") + # Add initial text (before first '\n') to the current line + output_lines[-1] += lines[0] + # Add subsequent lines as additional output lines + for line in lines[1:]: + output_lines.append(line) # Add new lines + + if shorten: + add_output(f"<{cls_str}: ") + add_output(f"{title_str} ", section="title") + + if data_str != "": + # Flatten to a single line, reducing repeated spaces. + def flatten_array_str(array_str): + array_str = array_str.replace("\n", " ") + array_str = array_str.replace("\t", " ") + while " " in array_str: + array_str = array_str.replace(" ", " ") + return array_str + + data_str = flatten_array_str(data_str) + # Adjust maximum-width to allow for the title width in the + # repr form. + current_line_len = len(output_lines[-1]) + using_array_width = given_array_width - current_line_len + # Work out whether to include a summary of the data values + if len(data_str) > using_array_width: + # Make one more attempt, printing just the *first* point, + # as this is useful for dates. + data_str = data_str = array_summary( + self._values[:1], + n_max=max_values, + n_edge=edgeitems, + linewidth=using_array_width, + precision=precision, + ) + data_str = flatten_array_str(data_str) + data_str = data_str[:-1] + ", ...]" + if len(data_str) > using_array_width: + # Data summary is still too long : replace with array + # "placeholder" representation. + data_str = "[...]" + + if self.has_bounds(): + data_str += "+bounds" + + if self.shape != (1,): + # Anything non-scalar : show shape as well. + data_str += f" shape{shape_str}" + + # single-line output in 'shorten' mode + add_output(f"{data_str}>", section="data") + + else: + # Long (multi-line) output format. + add_output(f"{cls_str} : ") + add_output(f"{title_str}", section="title") + + def reindent_data_string(text, n_indent): + lines = [line for line in text.split("\n")] + indent = " " * (n_indent - 1) # allow 1 for the initial '[' + # Indent all but the *first* line. + line_1, rest_lines = lines[0], lines[1:] + rest_lines = ["\n" + indent + line for line in rest_lines] + result = line_1 + "".join(rest_lines) + return result + + data_array_str = reindent_data_string(data_str, 2 * n_indent) + + # NOTE: actual section name is variable here : data/points/indices + data_text = f"{self._values_array_name}: " + if "\n" in data_array_str: + # Put initial '[' here, and the rest on subsequent lines + data_text += "[" + newline_indent + indent + data_array_str[1:] + else: + # All on one line + data_text += data_array_str + + # N.B. indent section and record section start after that + add_output(newline_indent) + add_output(data_text, section="data") + + if self.has_bounds(): + # Add a bounds section : basically just like the 'data'. + if self._bounds_dm.has_lazy_data(): + bounds_array_str = "" + elif max_values == 0: + bounds_array_str = "[...]" + else: + bounds_array_str = array_summary( + self._bounds_dm.data, + n_max=max_values, + n_edge=edgeitems, + linewidth=using_array_width, + precision=precision, + ) + bounds_array_str = reindent_data_string( + bounds_array_str, 2 * n_indent + ) + + bounds_text = "bounds: " + if "\n" in bounds_array_str: + # Put initial '[' here, and the rest on subsequent lines + bounds_text += "[" + newline_indent + indent + bounds_array_str[1:] + else: + # All on one line + bounds_text += bounds_array_str + + # N.B. indent section and record section start after that + add_output(newline_indent) + add_output(bounds_text, section="bounds") + + if self.has_bounds(): + shape_str += f" bounds{self._bounds_dm.shape}" + + # Add shape section (always) + add_output(newline_indent) + add_output(f"shape: {shape_str}", section="shape") + + # Add dtype section (always) + add_output(newline_indent) + add_output(f"dtype: {self.dtype}", section="dtype") + + for name in self._metadata_manager._fields: + if name == "units": + # This was already included in the header line + continue + val = getattr(self, name, None) + if isinstance(val, Container): + # Don't print empty containers, like attributes={} + show = bool(val) + else: + # Don't print properties when not present, or set to None, + # or False. + # This works OK as long as we are happy to treat all + # boolean properties as 'off' when False : Which happens to + # work for all those defined so far. + show = val is not None and val is not False + if show: + if name == "attributes": + # Use a multi-line form for this. + add_output(newline_indent) + add_output("attributes:", section="attributes") + max_attname_len = max(len(attr) for attr in val.keys()) + for attrname, attrval in val.items(): + attrname = attrname.ljust(max_attname_len) + if isinstance(attrval, str): + # quote strings + attrval = repr(attrval) + # and abbreviate really long ones + attrval = iris.util.clip_string(attrval) + attr_string = f"{attrname} {attrval}" + add_output(newline_indent + indent + attr_string) + else: + # add a one-line section for this property + # (aka metadata field) + add_output(newline_indent) + add_output(f"{name}: {val!r}", section=name) + + return "\n".join(output_lines) + + def __str__(self): + return self.summary() + + def __repr__(self): + return self.summary(shorten=True) + + def __eq__(self, other): + if other is self: + return True + + # Note: this method includes bounds handling code, but it only runs + # within Coord type instances, as only these allow bounds to be set. + + eq = NotImplemented + # If the other object has a means of getting its definition, then do + # the comparison, otherwise return a NotImplemented to let Python try + # to resolve the operator elsewhere. + if hasattr(other, "metadata"): + # metadata comparison + eq = self.metadata == other.metadata + # data values comparison + if eq and eq is not NotImplemented: + eq = iris.util.array_equal( + self._core_values(), other._core_values(), withnans=True + ) + + # Also consider bounds, if we have them. + # (N.B. though only Coords can ever actually *have* bounds). + if eq and eq is not NotImplemented: + if self.has_bounds() and other.has_bounds(): + eq = iris.util.array_equal( + self.core_bounds(), other.core_bounds(), withnans=True + ) + else: + eq = not self.has_bounds() and not other.has_bounds() + + return eq + + def __ne__(self, other): + result = self.__eq__(other) + if result is not NotImplemented: + result = not result + return result + + # Must supply __hash__ as Python 3 does not enable it if __eq__ is defined. + # NOTE: Violates "objects which compare equal must have the same hash". + # We ought to remove this, as equality of two dimensional metadata can + # *change*, so they really should not be hashable. + # However, current code needs it, e.g. so we can put them in sets. + # Fixing it will require changing those uses. See #962 and #1772. + def __hash__(self): + return hash(id(self)) + + def __binary_operator__(self, other, mode_constant): + """Perform common code which is called by add, sub, mul and div. + + Mode constant is one of ADD, SUB, MUL, DIV, RDIV + + .. note:: + + The unit is *not* changed when doing scalar operations on a + metadata object. This means that a metadata object which represents + "10 meters" when multiplied by a scalar i.e. "1000" would result in + a metadata object of "10000 meters". An alternative approach could + be taken to multiply the *unit* by 1000 and the resultant metadata + object would represent "10 kilometers". + + """ + # Note: this method includes bounds handling code, but it only runs + # within Coord type instances, as only these allow bounds to be set. + + if isinstance(other, _DimensionalMetadata): + emsg = ( + f"{self.__class__.__name__} " + f"{self._MODE_SYMBOL[mode_constant]} " + f"{other.__class__.__name__}" + ) + raise iris.exceptions.NotYetImplementedError(emsg) + + if isinstance(other, (int, float, np.number)): + + def op(values): + if mode_constant == self._MODE_ADD: + new_values = values + other + elif mode_constant == self._MODE_SUB: + new_values = values - other + elif mode_constant == self._MODE_MUL: + new_values = values * other + elif mode_constant == self._MODE_DIV: + new_values = values / other + elif mode_constant == self._MODE_RDIV: + new_values = other / values + return new_values + + new_values = op(self._values_dm.core_data()) + result = self.copy(new_values) + + if self.has_bounds(): + result.bounds = op(self._bounds_dm.core_data()) + else: + # must return NotImplemented to ensure invocation of any + # associated reflected operator on the "other" operand + # see https://docs.python.org/3/reference/datamodel.html#emulating-numeric-types + result = NotImplemented + + return result + + def __add__(self, other): + return self.__binary_operator__(other, self._MODE_ADD) + + def __sub__(self, other): + return self.__binary_operator__(other, self._MODE_SUB) + + def __mul__(self, other): + return self.__binary_operator__(other, self._MODE_MUL) + + def __div__(self, other): + return self.__binary_operator__(other, self._MODE_DIV) + + def __truediv__(self, other): + return self.__binary_operator__(other, self._MODE_DIV) + + __radd__ = __add__ + + def __rsub__(self, other): + return (-self) + other + + def __rdiv__(self, other): + return self.__binary_operator__(other, self._MODE_RDIV) + + def __rtruediv__(self, other): + return self.__binary_operator__(other, self._MODE_RDIV) + + __rmul__ = __mul__ + + def __neg__(self): + values = -self._core_values() + copy_args = {} + if self.has_bounds(): + copy_args["bounds"] = -self.core_bounds() + return self.copy(values, **copy_args) + + def convert_units(self, unit): + """Change the units, converting the values of the metadata.""" + # If the coord has units convert the values in points (and bounds if + # present). + # Note: this method includes bounds handling code, but it only runs + # within Coord type instances, as only these allow bounds to be set. + if self.units.is_unknown(): + raise iris.exceptions.UnitConversionError( + "Cannot convert from unknown units. " + 'The "units" attribute may be set directly.' + ) + + # Set up a delayed conversion for use if either values or bounds (if + # present) are lazy. + # Make fixed copies of old + new units for a delayed conversion. + old_unit = self.units + new_unit = unit + + # Define a delayed conversion operation (i.e. a callback). + def pointwise_convert(values): + return old_unit.convert(values, new_unit) + + if self._has_lazy_values(): + new_values = _lazy.lazy_elementwise(self._lazy_values(), pointwise_convert) + else: + new_values = self.units.convert(self._values, unit) + self._values = new_values + if self.has_bounds(): + if self.has_lazy_bounds(): + new_bounds = _lazy.lazy_elementwise( + self.lazy_bounds(), pointwise_convert + ) + else: + new_bounds = self.units.convert(self.bounds, unit) + self.bounds = new_bounds + self.units = unit + + def is_compatible(self, other, ignore=None): + """Return whether the current dimensional metadata object is compatible with another.""" + compatible = self.name() == other.name() and self.units == other.units + + if compatible: + common_keys = set(self.attributes).intersection(other.attributes) + if ignore is not None: + if isinstance(ignore, str): + ignore = (ignore,) + common_keys = common_keys.difference(ignore) + for key in common_keys: + if np.any(self.attributes[key] != other.attributes[key]): + compatible = False + break + + return compatible + + @property + def dtype(self): + """The NumPy dtype of the current dimensional metadata object, as specified by its values.""" + return self._values_dm.dtype + + @property + def ndim(self): + """Return the number of dimensions of the current dimensional metadata object.""" + return self._values_dm.ndim + + def has_bounds(self): + """Indicate whether the current dimensional metadata object has a bounds array.""" + # Allows for code to handle unbounded dimensional metadata agnostic of + # whether the metadata is a coordinate or not. + return False + + @property + def shape(self): + """The fundamental shape of the metadata, expressed as a tuple.""" + return self._values_dm.shape + + def xml_element(self, doc): + """Create XML element. + + Create the :class:`xml.dom.minidom.Element` that describes this + :class:`_DimensionalMetadata`. + + Parameters + ---------- + doc : + The parent :class:`xml.dom.minidom.Document`. + + Returns + ------- + :class:`xml.dom.minidom.Element` + :class:`xml.dom.minidom.Element` that will describe this + :class:`_DimensionalMetadata`. - def __new__(cls, name_or_coord, minimum, maximum, - min_inclusive=True, max_inclusive=True): """ - Create a CoordExtent for the specified coordinate and range of - values. + # deferred import to avoid possible circularity + from iris.mesh import Connectivity - Args: + # Create the XML element as the camelCaseEquivalent of the + # class name. + element_name = type(self).__name__ + element_name = element_name[0].lower() + element_name[1:] + element = doc.createElement(element_name) - * name_or_coord - Either a coordinate name or a coordinate, as defined in - :meth:`iris.cube.Cube.coords()`. + element.setAttribute("id", self._xml_id()) - * minimum - The minimum value of the range to select. + if self.standard_name: + element.setAttribute("standard_name", str(self.standard_name)) + if self.long_name: + element.setAttribute("long_name", str(self.long_name)) + if self.var_name: + element.setAttribute("var_name", str(self.var_name)) + element.setAttribute("units", repr(self.units)) + if isinstance(self, Coord): + if self.climatological: + element.setAttribute("climatological", str(self.climatological)) + if self.attributes: + attributes_element = doc.createElement("attributes") + for name in sorted(self.attributes.keys()): + attribute_element = doc.createElement("attribute") + attribute_element.setAttribute("name", name) + attribute_element.setAttribute("value", str(self.attributes[name])) + attributes_element.appendChild(attribute_element) + element.appendChild(attributes_element) - * maximum - The maximum value of the range to select. + if isinstance(self, Coord): + if self.coord_system: + element.appendChild(self.coord_system.xml_element(doc)) + + # Add the values + element.setAttribute("value_type", str(self._value_type_name())) + element.setAttribute("shape", str(self.shape)) + + # The values are referred to "points" of a coordinate and "data" + # otherwise. + if isinstance(self, Coord): + values_term = "points" + elif isinstance(self, Connectivity): + values_term = "indices" + else: + values_term = "data" + element.setAttribute(values_term, self._xml_array_repr(self._values)) + + return element + + def _xml_id_extra(self, unique_value): + return unique_value + + def _xml_id(self): + # Returns a consistent, unique string identifier for this coordinate. + unique_value = b"" + if self.standard_name: + unique_value += self.standard_name.encode("utf-8") + unique_value += b"\0" + if self.long_name: + unique_value += self.long_name.encode("utf-8") + unique_value += b"\0" + unique_value += str(self.units).encode("utf-8") + b"\0" + for k, v in sorted(self.attributes.items()): + unique_value += (str(k) + ":" + str(v)).encode("utf-8") + b"\0" + # Extra modifications to unique_value that are specialised in child + # classes + unique_value = self._xml_id_extra(unique_value) + # Mask to ensure consistency across Python versions & platforms. + crc = zlib.crc32(unique_value) & 0xFFFFFFFF + return "%08x" % (crc,) + + @staticmethod + def _xml_array_repr(data): + if hasattr(data, "to_xml_attr"): + result = data._values.to_xml_attr() + else: + result = iris.util.format_array(data) + return result + + def _value_type_name(self): + """Provide a simple name for the data type of the dimensional metadata values.""" + dtype = self._core_values().dtype + kind = dtype.kind + if kind in "SU": + # Establish the basic type name for 'string' type data. + if kind == "S": + value_type_name = "bytes" + else: + value_type_name = "string" + else: + value_type_name = dtype.name + + return value_type_name + + +class AncillaryVariable(_DimensionalMetadata): + def __init__( + self, + data, + standard_name=None, + long_name=None, + var_name=None, + units=None, + attributes=None, + ): + """Construct a single ancillary variable. + + Parameters + ---------- + data : + The values of the ancillary variable. + standard_name : optional + CF standard name of the ancillary variable. + long_name : optional + Descriptive name of the ancillary variable. + var_name : optional + The netCDF variable name for the ancillary variable. + units : optional + The :class:`~cf_units.Unit` of the ancillary variable's values. + Can be a string, which will be converted to a Unit object. + attributes : optional + A dictionary containing other cf and user-defined attributes. + + """ + # Configure the metadata manager. + if not hasattr(self, "_metadata_manager"): + self._metadata_manager = metadata_manager_factory(AncillaryVariableMetadata) + + super().__init__( + values=data, + standard_name=standard_name, + long_name=long_name, + var_name=var_name, + units=units, + attributes=attributes, + ) + + @property + def data(self): + return self._values + + @data.setter + def data(self, data): + self._values = data + + def lazy_data(self): + """Return a lazy array representing the ancillary variable's data. + + Accessing this method will never cause the data values to be loaded. + Similarly, calling methods on, or indexing, the returned Array + will not cause the ancillary variable to have loaded data. + + If the data have already been loaded for the ancillary variable, the + returned Array will be a new lazy array wrapper. + + Returns + ------- + A lazy array, representing the ancillary variable data array. + + """ + return super()._lazy_values() + + def core_data(self): + """Return data array at the core of this ancillary variable. + + The data array at the core of this ancillary variable, which may be a + NumPy array or a dask array. + + """ + return super()._core_values() + + def has_lazy_data(self): + """Indicate whether the ancillary variable's data array is a lazy dask array or not.""" + return super()._has_lazy_values() + + def cube_dims(self, cube): + """Return the cube dimensions of this AncillaryVariable. + + Equivalent to "cube.ancillary_variable_dims(self)". + + """ + return cube.ancillary_variable_dims(self) + + +class CellMeasure(AncillaryVariable): + """A CF Cell Measure, providing area or volume properties of a cell. + + A CF Cell Measure, providing area or volume properties of a cell + where these cannot be inferred from the Coordinates and + Coordinate Reference System. + + """ - Kwargs: + def __init__( + self, + data, + standard_name=None, + long_name=None, + var_name=None, + units=None, + attributes=None, + measure=None, + ): + """Construct a single cell measure. + + Parameters + ---------- + data : + The values of the measure for each cell. + Either a 'real' array (:class:`numpy.ndarray`) or a 'lazy' array + (:class:`dask.array.Array`). + standard_name : optional + CF standard name of the coordinate. + long_name : optional + Descriptive name of the coordinate. + var_name : optional + The netCDF variable name for the coordinate. + units : optional + The :class:`~cf_units.Unit` of the coordinate's values. + Can be a string, which will be converted to a Unit object. + attributes : optional + A dictionary containing other CF and user-defined attributes. + measure : optional + A string describing the type of measure. Supported values are + 'area' and 'volume'. The default is 'area'. - * min_inclusive + """ + # Configure the metadata manager. + self._metadata_manager = metadata_manager_factory(CellMeasureMetadata) + + super().__init__( + data=data, + standard_name=standard_name, + long_name=long_name, + var_name=var_name, + units=units, + attributes=attributes, + ) + + if measure is None: + measure = "area" + + #: String naming the measure type. + self.measure = measure + + @property + def measure(self): + return self._metadata_manager.measure + + @measure.setter + def measure(self, measure): + if measure not in ["area", "volume"]: + emsg = f"measure must be 'area' or 'volume', got {measure!r}" + raise ValueError(emsg) + self._metadata_manager.measure = measure + + def cube_dims(self, cube): + """Return the cube dimensions of this CellMeasure. + + Equivalent to "cube.cell_measure_dims(self)". + + """ + return cube.cell_measure_dims(self) + + def xml_element(self, doc): + """Create the :class:`xml.dom.minidom.Element` that describes this :class:`CellMeasure`. + + Parameters + ---------- + doc : + The parent :class:`xml.dom.minidom.Document`. + + Returns + ------- + :class:`xml.dom.minidom.Element` + The :class:`xml.dom.minidom.Element` that describes this + :class:`CellMeasure`. + + """ + # Create the XML element as the camelCaseEquivalent of the + # class name + element = super().xml_element(doc=doc) + + # Add the 'measure' property + element.setAttribute("measure", self.measure) + + return element + + +class CoordExtent( + namedtuple( + "_CoordExtent", + [ + "name_or_coord", + "minimum", + "maximum", + "min_inclusive", + "max_inclusive", + ], + ) +): + """Defines a range of values for a coordinate.""" + + def __new__( + cls, + name_or_coord, + minimum, + maximum, + min_inclusive=True, + max_inclusive=True, + ): + """Create a CoordExtent for the specified coordinate and range of values. + + Parameters + ---------- + name_or_coord : + Either a coordinate name or a coordinate, as defined in + :meth:`iris.cube.Cube.coords()`. + minimum : + The minimum value of the range to select. + maximum : + The maximum value of the range to select. + min_inclusive : bool, default=True If True, coordinate values equal to `minimum` will be included in the selection. Default is True. - - * max_inclusive + max_inclusive : bool, default=True If True, coordinate values equal to `maximum` will be included in the selection. Default is True. """ - return super(CoordExtent, cls).__new__(cls, name_or_coord, minimum, - maximum, min_inclusive, - max_inclusive) + return super().__new__( + cls, name_or_coord, minimum, maximum, min_inclusive, max_inclusive + ) __slots__ = () @@ -143,14 +1151,8 @@ def __new__(cls, name_or_coord, minimum, maximum, BOUND_POSITION_END = 1 -# Private named tuple class for coordinate groups. -_GroupbyItem = namedtuple('GroupbyItem', - 'groupby_point, groupby_slice') - - def _get_2d_coord_bound_grid(bounds): - """ - Creates a grid using the bounds of a 2D coordinate with 4 sided cells. + """Create a grid using the bounds of a 2D coordinate with 4 sided cells. Assumes that the four vertices of the cells are in an anti-clockwise order (bottom-left, bottom-right, top-right, top-left). @@ -164,19 +1166,23 @@ def _get_2d_coord_bound_grid(bounds): # 0-0-0-0-1 # 3-3-3-3-2 - Args: - * bounds: (array) - Coordinate bounds array of shape (Y, X, 4) + Parameters + ---------- + bounds : array + Coordinate bounds array of shape (Y, X, 4). - Returns: - * grid: (array) - Grid of shape (Y+1, X+1) + Returns + ------- + array + Grid of shape (Y+1, X+1). """ # Check bds has the shape (ny, nx, 4) if not (bounds.ndim == 3 and bounds.shape[-1] == 4): - raise ValueError('Bounds for 2D coordinates must be 3-dimensional and ' - 'have 4 bounds per point.') + raise ValueError( + "Bounds for 2D coordinates must be 3-dimensional and " + "have 4 bounds per point." + ) bounds_shape = bounds.shape result = np.zeros((bounds_shape[0] + 1, bounds_shape[1] + 1)) @@ -189,8 +1195,9 @@ def _get_2d_coord_bound_grid(bounds): return result -class Cell(namedtuple('Cell', ['point', 'bound'])): - """ +class Cell(namedtuple("Cell", ["point", "bound"])): + """A coordinate cell containing a single point, or point and bounds. + An immutable representation of a single cell of a coordinate, including the sample point and/or boundary position. @@ -227,12 +1234,9 @@ class Cell(namedtuple('Cell', ['point', 'bound'])): __array_priority__ = 100 def __new__(cls, point=None, bound=None): - """ - Construct a Cell from point or point-and-bound information. - - """ + """Construct a Cell from point or point-and-bound information.""" if point is None: - raise ValueError('Point must be defined.') + raise ValueError("Point must be defined.") if bound is not None: bound = tuple(bound) @@ -242,11 +1246,12 @@ def __new__(cls, point=None, bound=None): if isinstance(point, (tuple, list)): if len(point) != 1: - raise ValueError('Point may only be a list or tuple if it has ' - 'length 1.') + raise ValueError( + "Point may only be a list or tuple if it has length 1." + ) point = point[0] - return super(Cell, cls).__new__(cls, point, bound) + return super().__new__(cls, point, bound) def __mod__(self, mod): point = self.point @@ -267,24 +1272,31 @@ def __add__(self, mod): return Cell(point, bound) def __hash__(self): - return super(Cell, self).__hash__() + # See __eq__ for the definition of when two cells are equal. + if self.bound is None: + return hash(self.point) + bound = self.bound + rbound = bound[::-1] + if rbound < bound: + bound = rbound + return hash((self.point, bound)) def __eq__(self, other): - """ - Compares Cell equality depending on the type of the object to be - compared. - - """ - if isinstance(other, (int, float, np.number)) or \ - hasattr(other, 'timetuple'): + """Compare Cell equality depending on the type of the object to be compared.""" + if isinstance(other, (int, float, np.number)) or hasattr(other, "timetuple"): if self.bound is not None: return self.contains_point(other) else: return self.point == other elif isinstance(other, Cell): - return (self.point == other.point) and (self.bound == other.bound) - elif (isinstance(other, six.string_types) and self.bound is None and - isinstance(self.point, six.string_types)): + return (self.point == other.point) and ( + self.bound == other.bound or self.bound == other.bound[::-1] + ) + elif ( + isinstance(other, str) + and self.bound is None + and isinstance(self.point, str) + ): return self.point == other else: return NotImplemented @@ -297,30 +1309,34 @@ def __ne__(self, other): return result def __common_cmp__(self, other, operator_method): - """ + """Common equality comparison. + Common method called by the rich comparison operators. The method of checking equality depends on the type of the object to be compared. Cell vs Cell comparison is used to define a strict order. Non-Cell vs Cell comparison is used to define Constraint matching. - """ - if not (isinstance(other, (int, float, np.number, Cell)) or - hasattr(other, 'timetuple')): - raise TypeError("Unexpected type of other " - "{}.".format(type(other))) - if operator_method not in (operator.gt, operator.lt, - operator.ge, operator.le): + """ # noqa: D401 + if (isinstance(other, list) and len(other) == 1) or ( + isinstance(other, np.ndarray) and other.shape == (1,) + ): + other = other[0] + if isinstance(other, np.ndarray) and other.shape == (): + other = float(other) + if not ( + isinstance(other, (int, float, np.number, Cell)) + or hasattr(other, "timetuple") + ): + raise TypeError("Unexpected type of other {}.".format(type(other))) + if operator_method not in ( + operator.gt, + operator.lt, + operator.ge, + operator.le, + ): raise ValueError("Unexpected operator_method") - # Prevent silent errors resulting from missing cftime - # behaviour. - if (isinstance(other, cftime.datetime) or - (isinstance(self.point, cftime.datetime) and - not isinstance(other, iris.time.PartialDateTime))): - raise TypeError('Cannot determine the order of ' - 'cftime.datetime objects') - if isinstance(other, Cell): # Cell vs Cell comparison for providing a strict sort order if self.bound is None: @@ -359,8 +1375,7 @@ def __common_cmp__(self, other, operator_method): if self.bound[1] == other.bound[1]: result = operator_method(self.point, other.point) else: - result = operator_method(self.bound[1], - other.bound[1]) + result = operator_method(self.bound[1], other.bound[1]) else: result = operator_method(self.bound[0], other.bound[0]) else: @@ -371,10 +1386,6 @@ def __common_cmp__(self, other, operator_method): # - Simple matching me = self.point else: - if hasattr(other, 'timetuple'): - raise TypeError('Cannot determine whether a point lies ' - 'within a bounded region for ' - 'datetime-like objects.') # Point-and-bound vs number # - Match if "within" the Cell if operator_method in [operator.gt, operator.le]: @@ -382,17 +1393,7 @@ def __common_cmp__(self, other, operator_method): else: me = max(self.bound) - # Work around to handle cftime.datetime comparison, which - # doesn't return NotImplemented on failure in some versions of the - # library - try: - result = operator_method(me, other) - except TypeError: - rop = {operator.lt: operator.gt, - operator.gt: operator.lt, - operator.le: operator.ge, - operator.ge: operator.le}[operator_method] - result = rop(other, me) + result = operator_method(me, other) return result @@ -403,91 +1404,80 @@ def __le__(self, other): return self.__common_cmp__(other, operator.le) def __gt__(self, other): - return self.__common_cmp__(other, operator.gt) - - def __lt__(self, other): - return self.__common_cmp__(other, operator.lt) - - def __str__(self): - if self.bound is not None: - return repr(self) - else: - return str(self.point) - - def contains_point(self, point): - """ - For a bounded cell, returns whether the given point lies within the - bounds. - - .. note:: The test carried out is equivalent to min(bound) - <= point <= max(bound). - - """ - if self.bound is None: - raise ValueError('Point cannot exist inside an unbounded cell.') - if hasattr(point, 'timetuple') or np.any([hasattr(val, 'timetuple') for - val in self.bound]): - raise TypeError('Cannot determine whether a point lies within ' - 'a bounded region for datetime-like objects.') - - return np.min(self.bound) <= point <= np.max(self.bound) - - -class Coord(six.with_metaclass(ABCMeta, CFVariableMixin)): - """ - Abstract superclass for coordinates. - - """ - - _MODE_ADD = 1 - _MODE_SUB = 2 - _MODE_MUL = 3 - _MODE_DIV = 4 - _MODE_RDIV = 5 - _MODE_SYMBOL = {_MODE_ADD: '+', _MODE_SUB: '-', - _MODE_MUL: '*', _MODE_DIV: '/', - _MODE_RDIV: '/'} + return self.__common_cmp__(other, operator.gt) - def __init__(self, points, standard_name=None, long_name=None, - var_name=None, units='1', bounds=None, - attributes=None, coord_system=None, - climatological=False): + def __lt__(self, other): + return self.__common_cmp__(other, operator.lt) - """ - Constructs a single coordinate. + def __str__(self): + if self.bound is not None: + return repr(self) + else: + return str(self.point) + + def contains_point(self, point): + """For a bounded cell, returns whether the given point lies within the bounds. - Args: + .. note:: The test carried out is equivalent to min(bound) + <= point <= max(bound). - * points: - The values (or value in the case of a scalar coordinate) of the - coordinate for each cell. + """ + if self.bound is None: + raise ValueError("Point cannot exist inside an unbounded cell.") + return np.min(self.bound) <= point <= np.max(self.bound) - Kwargs: - * standard_name: +class Coord(_DimensionalMetadata): + """Abstract base class for coordinates.""" + + _values_array_name = "points" + + @abstractmethod + def __init__( + self, + points, + standard_name=None, + long_name=None, + var_name=None, + units=None, + bounds=None, + attributes=None, + coord_system=None, + climatological=False, + ): + """Coordinate abstract base class. + + As of ``v3.0.0`` you **cannot** create an instance of :class:`Coord`. + + Parameters + ---------- + points : + The values (or value in the case of a scalar coordinate) for each + cell of the coordinate. + standard_name : optional CF standard name of the coordinate. - * long_name: + long_name : optional Descriptive name of the coordinate. - * var_name: + var_name : optional The netCDF variable name for the coordinate. - * units + units : optional The :class:`~cf_units.Unit` of the coordinate's values. Can be a string, which will be converted to a Unit object. - * bounds + bounds : optional An array of values describing the bounds of each cell. Given n bounds for each cell, the shape of the bounds array should be - points.shape + (n,). For example, a 1d coordinate with 100 points + points.shape + (n,). For example, a 1D coordinate with 100 points and two bounds per cell would have a bounds array of shape (100, 2) Note if the data is a climatology, `climatological` should be set. - * attributes - A dictionary containing other cf and user-defined attributes. - * coord_system + attributes : optional + A dictionary containing other CF and user-defined attributes. + coord_system : optional A :class:`~iris.coord_systems.CoordSystem` representing the coordinate system of the coordinate, - e.g. a :class:`~iris.coord_systems.GeogCS` for a longitude Coord. - * climatological (bool): + e.g., a :class:`~iris.coord_systems.GeogCS` for a longitude coordinate. + climatological : bool, default=False When True: the coordinate is a NetCDF climatological time axis. When True: saving in NetCDF will give the coordinate variable a 'climatology' attribute and will create a boundary variable called @@ -496,167 +1486,108 @@ def __init__(self, points, standard_name=None, long_name=None, Will set to True when a climatological time axis is loaded from NetCDF. Always False if no bounds exist. - """ - #: CF standard name of the quantity that the coordinate represents. - self.standard_name = standard_name - - #: Descriptive name of the coordinate. - self.long_name = long_name - - #: The netCDF variable name for the coordinate. - self.var_name = var_name - #: Unit of the quantity that the coordinate represents. - self.units = units + """ + # Configure the metadata manager. + if not hasattr(self, "_metadata_manager"): + self._metadata_manager = metadata_manager_factory(CoordMetadata) - #: Other attributes, including user specified attributes that - #: have no meaning to Iris. - self.attributes = attributes + super().__init__( + values=points, + standard_name=standard_name, + long_name=long_name, + var_name=var_name, + units=units, + attributes=attributes, + ) #: Relevant coordinate system (if any). self.coord_system = coord_system - # Set up DataManager attributes and points and bounds values. - self._points_dm = None + # Set up bounds DataManager attributes and the bounds values. self._bounds_dm = None - self.points = points self.bounds = bounds self.climatological = climatological - def __getitem__(self, keys): - """ - Returns a new Coord whose values are obtained by conventional array - indexing. - - .. note:: - - Indexing of a circular coordinate results in a non-circular - coordinate if the overall shape of the coordinate changes after - indexing. - - """ - # Fetch the points and bounds. - points = self._points_dm.core_data() - if self.has_bounds(): - bounds = self._bounds_dm.core_data() - else: - bounds = None - - # Index both points and bounds with the keys. - _, points = iris.util._slice_data_with_keys( - points, keys) - if bounds is not None: - _, bounds = iris.util._slice_data_with_keys( - bounds, keys) - - # Copy data after indexing, to avoid making coords that are - # views on other coords. This will not realise lazy data. - points = points.copy() - if bounds is not None: - bounds = bounds.copy() - - # The new coordinate is a copy of the old one with replaced content. - new_coord = self.copy(points=points, bounds=bounds) - return new_coord + self._ignore_axis = DEFAULT_IGNORE_AXIS def copy(self, points=None, bounds=None): - """ - Returns a copy of this coordinate. - - Kwargs: - - * points: A points array for the new coordinate. - This may be a different shape to the points of the coordinate - being copied. - - * bounds: A bounds array for the new coordinate. - Given n bounds for each cell, the shape of the bounds array - should be points.shape + (n,). For example, a 1d coordinate - with 100 points and two bounds per cell would have a bounds - array of shape (100, 2). - + """Return a copy of this coordinate. + + Parameters + ---------- + points : optional + A points array for the new coordinate. + This may be a different shape to the points of the coordinate + being copied. + bounds : optional + A bounds array for the new coordinate. + Given n bounds for each cell, the shape of the bounds array + should be points.shape + (n,). For example, a 1d coordinate + with 100 points and two bounds per cell would have a bounds + array of shape (100, 2). + + Notes + ----- .. note:: If the points argument is specified and bounds are not, the resulting coordinate will have no bounds. """ if points is None and bounds is not None: - raise ValueError('If bounds are specified, points must also be ' - 'specified') + raise ValueError("If bounds are specified, points must also be specified") - new_coord = copy.deepcopy(self) + new_coord = super().copy(values=points) if points is not None: - new_coord._points_dm = None - new_coord.points = points # Regardless of whether bounds are provided as an argument, new # points will result in new bounds, discarding those copied from # self. new_coord.bounds = bounds + # The state of ignore_axis is controlled by the coordinate rather than + # the metadata manager + new_coord.ignore_axis = self.ignore_axis + return new_coord @classmethod def from_coord(cls, coord): """Create a new Coord of this type, from the given coordinate.""" - kwargs = {'points': coord.core_points(), - 'bounds': coord.core_bounds(), - 'standard_name': coord.standard_name, - 'long_name': coord.long_name, - 'var_name': coord.var_name, - 'units': coord.units, - 'attributes': coord.attributes, - 'coord_system': copy.deepcopy(coord.coord_system)} + kwargs = { + "points": coord.core_points(), + "bounds": coord.core_bounds(), + "standard_name": coord.standard_name, + "long_name": coord.long_name, + "var_name": coord.var_name, + "units": coord.units, + "attributes": coord.attributes, + "coord_system": copy.deepcopy(coord.coord_system), + "climatological": coord.climatological, + } if issubclass(cls, DimCoord): # DimCoord introduces an extra constructor keyword. - kwargs['circular'] = getattr(coord, 'circular', False) - return cls(**kwargs) + kwargs["circular"] = getattr(coord, "circular", False) - @staticmethod - def _sanitise_array(src, ndmin): - if _lazy.is_lazy_data(src): - # Lazy data : just ensure ndmin requirement. - ndims_missing = ndmin - src.ndim - if ndims_missing <= 0: - result = src - else: - extended_shape = tuple([1] * ndims_missing + list(src.shape)) - result = src.reshape(extended_shape) - else: - # Real data : a few more things to do in this case. - # Ensure the array is writeable. - # NB. Returns the *same object* if src is already writeable. - result = np.require(src, requirements='W') - # Ensure the array has enough dimensions. - # NB. Returns the *same object* if result.ndim >= ndmin - func = ma.array if ma.isMaskedArray(result) else np.array - result = func(result, ndmin=ndmin, copy=False) - # We don't need to copy the data, but we do need to have our - # own view so we can control the shape, etc. - result = result.view() - return result + new_coord = cls(**kwargs) + + # The state of ignore_axis is controlled by the coordinate rather than + # the metadata manager + new_coord.ignore_axis = coord.ignore_axis + + return new_coord @property def points(self): """The coordinate points values as a NumPy array.""" - return self._points_dm.data.view() + return self._values @points.setter def points(self, points): - # Set the points to a new array - as long as it's the same shape. - - # Ensure points has an ndmin of 1 and is either a numpy or lazy array. - # This will avoid Scalar coords with points of shape () rather - # than the desired (1,). - points = self._sanitise_array(points, 1) - - # Set or update DataManager. - if self._points_dm is None: - self._points_dm = DataManager(points) - else: - self._points_dm.data = points + self._values = points @property def bounds(self): - """ + """Coordinate bounds values. + The coordinate bounds values, as a NumPy array, or None if no bound values are defined. @@ -674,22 +1605,30 @@ def bounds(self, bounds): # Ensure the bounds are a compatible shape. if bounds is None: self._bounds_dm = None - self._climatological = False + self.climatological = False else: bounds = self._sanitise_array(bounds, 2) if self.shape != bounds.shape[:-1]: - raise ValueError("Bounds shape must be compatible with points " - "shape.") - if not self.has_bounds() \ - or self.core_bounds().shape != bounds.shape: + raise ValueError("Bounds shape must be compatible with points shape.") + if not self.has_bounds() or self.core_bounds().shape != bounds.shape: # Construct a new bounds DataManager. self._bounds_dm = DataManager(bounds) else: self._bounds_dm.data = bounds + @property + def coord_system(self): + """The coordinate-system of the coordinate.""" + return self._metadata_manager.coord_system + + @coord_system.setter + def coord_system(self, value): + self._metadata_manager.coord_system = value + @property def climatological(self): - """ + """Flag for representing a climatological time axis. + A boolean that controls whether the coordinate is a climatological time axis, in which case the bounds represent a climatological period rather than a normal period. @@ -697,8 +1636,13 @@ def climatological(self): Always reads as False if there are no bounds. On set, the input value is cast to a boolean, exceptions raised if units are not time units or if there are no bounds. + """ - return self._climatological if self.has_bounds() else False + if not self.has_bounds(): + self._metadata_manager.climatological = False + if not self.units.is_time_reference(): + self._metadata_manager.climatological = False + return self._metadata_manager.climatological @climatological.setter def climatological(self, value): @@ -706,19 +1650,36 @@ def climatological(self, value): value = bool(value) if value: if not self.units.is_time_reference(): - emsg = ("Cannot set climatological coordinate, does not have" - " valid time reference units, got {!r}.") + emsg = ( + "Cannot set climatological coordinate, does not have" + " valid time reference units, got {!r}." + ) raise TypeError(emsg.format(self.units)) if not self.has_bounds(): emsg = "Cannot set climatological coordinate, no bounds exist." raise ValueError(emsg) - self._climatological = value + self._metadata_manager.climatological = value - def lazy_points(self): + @property + def ignore_axis(self): + """A boolean controlling if iris.util.guess_coord_axis acts on this coordinate. + + Defaults to ``False``, and when set to ``True`` it will be skipped by + :func:`iris.util.guess_coord_axis`. """ - Return a lazy array representing the coord points. + return self._ignore_axis + + @ignore_axis.setter + def ignore_axis(self, value): + if not isinstance(value, bool): + emsg = "'ignore_axis' can only be set to 'True' or 'False'" + raise ValueError(emsg) + self._ignore_axis = value + + def lazy_points(self): + """Return a lazy array representing the coord points. Accessing this method will never cause the points values to be loaded. Similarly, calling methods on, or indexing, the returned Array @@ -727,15 +1688,15 @@ def lazy_points(self): If the data have already been loaded for the coord, the returned Array will be a new lazy array wrapper. - Returns: - A lazy array, representing the coord points array. + Returns + ------- + A lazy array, representing the coord points array. """ - return self._points_dm.lazy_data() + return super()._lazy_values() def lazy_bounds(self): - """ - Return a lazy array representing the coord bounds. + """Return a lazy array representing the coord bounds. Accessing this method will never cause the bounds values to be loaded. Similarly, calling methods on, or indexing, the returned Array @@ -744,7 +1705,9 @@ def lazy_bounds(self): If the data have already been loaded for the coord, the returned Array will be a new lazy array wrapper. - Returns: + Returns + ------- + lazy array A lazy array representing the coord bounds array or `None` if the coord does not have bounds. @@ -755,22 +1718,11 @@ def lazy_bounds(self): return lazy_bounds def core_points(self): - """ - The points array at the core of this coord, which may be a NumPy array - or a dask array. - - """ - result = self._points_dm.core_data() - if not _lazy.is_lazy_data(result): - result = result.view() - return result + """Core points array at the core of this coord, which may be a NumPy array or a dask array.""" + return super()._core_values() def core_bounds(self): - """ - The points array at the core of this coord, which may be a NumPy array - or a dask array. - - """ + """Core bounds. The points array at the core of this coord, which may be a NumPy array or a dask array.""" result = None if self.has_bounds(): result = self._bounds_dm.core_data() @@ -779,15 +1731,12 @@ def core_bounds(self): return result def has_lazy_points(self): - """ - Return a boolean indicating whether the coord's points array is a - lazy dask array or not. - - """ - return self._points_dm.has_lazy_data() + """Return a boolean whether the coord's points array is a lazy dask array or not.""" + return super()._has_lazy_values() def has_lazy_bounds(self): - """ + """Whether coordinate bounds are lazy. + Return a boolean indicating whether the coord's bounds array is a lazy dask array or not. @@ -797,102 +1746,6 @@ def has_lazy_bounds(self): result = self._bounds_dm.has_lazy_data() return result - def _repr_other_metadata(self): - fmt = '' - if self.long_name: - fmt = ', long_name={self.long_name!r}' - if self.var_name: - fmt += ', var_name={self.var_name!r}' - if len(self.attributes) > 0: - fmt += ', attributes={self.attributes}' - if self.coord_system: - fmt += ', coord_system={self.coord_system}' - if self.climatological: - fmt += ', climatological={' \ - 'self.climatological}' - result = fmt.format(self=self) - return result - - def _str_dates(self, dates_as_numbers): - date_obj_array = self.units.num2date(dates_as_numbers) - kwargs = {'separator': ', ', 'prefix': ' '} - return np.core.arrayprint.array2string(date_obj_array, - formatter={'all': str}, - **kwargs) - - def __str__(self): - if self.units.is_time_reference(): - fmt = '{cls}({points}{bounds}' \ - ', standard_name={self.standard_name!r}' \ - ', calendar={self.units.calendar!r}{other_metadata})' - if self.units.is_long_time_interval(): - # A time unit with a long time interval ("months" or "years") - # cannot be converted to a date using `num2date` so gracefully - # fall back to printing points as numbers, not datetimes. - points = self.points - else: - points = self._str_dates(self.points) - bounds = '' - if self.has_bounds(): - if self.units.is_long_time_interval(): - bounds_vals = self.bounds - else: - bounds_vals = self._str_dates(self.bounds) - bounds = ', bounds={vals}'.format(vals=bounds_vals) - result = fmt.format(self=self, cls=type(self).__name__, - points=points, bounds=bounds, - other_metadata=self._repr_other_metadata()) - else: - result = repr(self) - return result - - def __repr__(self): - fmt = '{cls}({self.points!r}{bounds}' \ - ', standard_name={self.standard_name!r}, units={self.units!r}' \ - '{other_metadata})' - bounds = '' - if self.has_bounds(): - bounds = ', bounds=' + repr(self.bounds) - result = fmt.format(self=self, cls=type(self).__name__, - bounds=bounds, - other_metadata=self._repr_other_metadata()) - return result - - def __eq__(self, other): - eq = NotImplemented - # If the other object has a means of getting its definition, and - # whether or not it has_points and has_bounds, then do the - # comparison, otherwise return a NotImplemented to let Python try to - # resolve the operator elsewhere. - if hasattr(other, '_as_defn'): - # metadata comparison - eq = self._as_defn() == other._as_defn() - # points comparison - if eq: - eq = iris.util.array_equal(self.points, other.points, - withnans=True) - # bounds comparison - if eq: - if self.has_bounds() and other.has_bounds(): - eq = iris.util.array_equal(self.bounds, other.bounds, - withnans=True) - else: - eq = self.bounds is None and other.bounds is None - - return eq - - def __ne__(self, other): - result = self.__eq__(other) - if result is not NotImplemented: - result = not result - return result - - def _as_defn(self): - defn = CoordDefn(self.standard_name, self.long_name, self.var_name, - self.units, self.attributes, self.coord_system, - self.climatological) - return defn - # Must supply __hash__ as Python 3 does not enable it if __eq__ is defined. # NOTE: Violates "objects which compare equal must have the same hash". # We ought to remove this, as equality of two coords can *change*, so they @@ -902,100 +1755,16 @@ def _as_defn(self): def __hash__(self): return hash(id(self)) - def __binary_operator__(self, other, mode_constant): - """ - Common code which is called by add, sub, mul and div - - Mode constant is one of ADD, SUB, MUL, DIV, RDIV - - .. note:: + def cube_dims(self, cube): + """Return the cube dimensions of this Coord. - The unit is *not* changed when doing scalar operations on a - coordinate. This means that a coordinate which represents - "10 meters" when multiplied by a scalar i.e. "1000" would result - in a coordinate of "10000 meters". An alternative approach could - be taken to multiply the *unit* by 1000 and the resultant - coordinate would represent "10 kilometers". + Equivalent to "cube.coord_dims(self)". """ - if isinstance(other, Coord): - emsg = 'coord {} coord'.format(Coord._MODE_SYMBOL[mode_constant]) - raise iris.exceptions.NotYetImplementedError(emsg) - - elif isinstance(other, (int, float, np.number)): - points = self._points_dm.core_data() - - if mode_constant == Coord._MODE_ADD: - new_points = points + other - elif mode_constant == Coord._MODE_SUB: - new_points = points - other - elif mode_constant == Coord._MODE_MUL: - new_points = points * other - elif mode_constant == Coord._MODE_DIV: - new_points = points / other - elif mode_constant == Coord._MODE_RDIV: - new_points = other / points - - if self.has_bounds(): - bounds = self._bounds_dm.core_data() - - if mode_constant == Coord._MODE_ADD: - new_bounds = bounds + other - elif mode_constant == Coord._MODE_SUB: - new_bounds = bounds - other - elif mode_constant == Coord._MODE_MUL: - new_bounds = bounds * other - elif mode_constant == Coord._MODE_DIV: - new_bounds = bounds / other - elif mode_constant == Coord._MODE_RDIV: - new_bounds = other / bounds - - else: - new_bounds = None - new_coord = self.copy(new_points, new_bounds) - return new_coord - - else: - return NotImplemented - - def __add__(self, other): - return self.__binary_operator__(other, Coord._MODE_ADD) - - def __sub__(self, other): - return self.__binary_operator__(other, Coord._MODE_SUB) - - def __mul__(self, other): - return self.__binary_operator__(other, Coord._MODE_MUL) - - def __div__(self, other): - return self.__binary_operator__(other, Coord._MODE_DIV) - - def __truediv__(self, other): - return self.__binary_operator__(other, Coord._MODE_DIV) - - def __radd__(self, other): - return self + other - - def __rsub__(self, other): - return (-self) + other - - def __rdiv__(self, other): - return self.__binary_operator__(other, Coord._MODE_RDIV) - - def __rtruediv__(self, other): - return self.__binary_operator__(other, Coord._MODE_RDIV) - - def __rmul__(self, other): - return self * other - - def __neg__(self): - return self.copy(-self.core_points(), - -self.core_bounds() if self.has_bounds() else None) + return cube.coord_dims(self) def convert_units(self, unit): - """ - Change the coordinate's units, converting the values in its points - and bounds arrays. + r"""Change the coordinate's units, converting the values in its points and bounds arrays. For example, if a coordinate's :attr:`~iris.coords.Coord.units` attribute is set to radians then:: @@ -1007,40 +1776,13 @@ def convert_units(self, unit): multiply each value in :attr:`~iris.coords.Coord.points` and :attr:`~iris.coords.Coord.bounds` by 180.0/:math:`\pi`. + Full list of supported units can be found in the UDUNITS-2 documentation + https://docs.unidata.ucar.edu/udunits/current/#Database """ - # If the coord has units convert the values in points (and bounds if - # present). - if self.units.is_unknown(): - raise iris.exceptions.UnitConversionError( - 'Cannot convert from unknown units. ' - 'The "coord.units" attribute may be set directly.') - if self.has_lazy_points() or self.has_lazy_bounds(): - # Make fixed copies of old + new units for a delayed conversion. - old_unit = self.units - new_unit = unit - - # Define a delayed conversion operation (i.e. a callback). - def pointwise_convert(values): - return old_unit.convert(values, new_unit) - - if self.has_lazy_points(): - new_points = _lazy.lazy_elementwise(self.lazy_points(), - pointwise_convert) - else: - new_points = self.units.convert(self.points, unit) - self.points = new_points - if self.has_bounds(): - if self.has_lazy_bounds(): - new_bounds = _lazy.lazy_elementwise(self.lazy_bounds(), - pointwise_convert) - else: - new_bounds = self.units.convert(self.bounds, unit) - self.bounds = new_bounds - self.units = unit + super().convert_units(unit=unit) def cells(self): - """ - Returns an iterable of Cell instances for this Coord. + """Return an iterable of Cell instances for this Coord. For example:: @@ -1048,89 +1790,131 @@ def cells(self): ... """ - return _CellIterator(self) + if self.ndim != 1: + raise iris.exceptions.CoordinateMultiDimError(self) + + points = self.points + bounds = self.bounds + if self.units.is_time_reference(): + points = self.units.num2date(points) + if self.has_bounds(): + bounds = self.units.num2date(bounds) + + if self.has_bounds(): + for point, bound in zip(points, bounds): + yield Cell(point, bound) + else: + for point in points: + yield Cell(point) def _sanity_check_bounds(self): if self.ndim == 1: if self.nbounds != 2: - raise ValueError('Invalid operation for {!r}, with {} ' - 'bound(s). Contiguous bounds are only ' - 'defined for 1D coordinates with 2 ' - 'bounds.'.format(self.name(), self.nbounds)) + raise ValueError( + "Invalid operation for {!r}, with {} " + "bound(s). Contiguous bounds are only " + "defined for 1D coordinates with 2 " + "bounds.".format(self.name(), self.nbounds) + ) elif self.ndim == 2: if self.nbounds != 4: - raise ValueError('Invalid operation for {!r}, with {} ' - 'bound(s). Contiguous bounds are only ' - 'defined for 2D coordinates with 4 ' - 'bounds.'.format(self.name(), self.nbounds)) + raise ValueError( + "Invalid operation for {!r}, with {} " + "bound(s). Contiguous bounds are only " + "defined for 2D coordinates with 4 " + "bounds.".format(self.name(), self.nbounds) + ) else: - raise ValueError('Invalid operation for {!r}. Contiguous bounds ' - 'are not defined for coordinates with more than ' - '2 dimensions.'.format(self.name())) + raise ValueError( + "Invalid operation for {!r}. Contiguous bounds " + "are not defined for coordinates with more than " + "2 dimensions.".format(self.name()) + ) def _discontiguity_in_bounds(self, rtol=1e-5, atol=1e-8): - """ - Checks that the bounds of the coordinate are contiguous. + """Check that the bounds of the coordinate are contiguous. - Kwargs: - * rtol: (float) + rtol : float, default=1e-5 Relative tolerance that is used when checking contiguity. Defaults to 1e-5. - * atol: (float) + atol : float, default=1e-8 Absolute tolerance that is used when checking contiguity. Defaults to 1e-8. - Returns: - * contiguous: (boolean) + Returns + ------- + contiguous : bool True if there are no discontiguities. - * diffs: (array or tuple of arrays) - The diffs along the bounds of the coordinate. If self is a 2D - coord of shape (Y, X), a tuple of arrays is returned, where the - first is an array of differences along the x-axis, of the shape - (Y, X-1) and the second is an array of differences along the - y-axis, of the shape (Y-1, X). + diffs : array or tuple of arrays + A boolean array or tuple of boolean arrays which are true where + there are discontiguities between neighbouring bounds. If self is + a 2D coord of shape (Y, X), a pair of arrays is returned, where + the first is an array of differences along the x-axis, of the + shape (Y, X-1) and the second is an array of differences along + the y-axis, of the shape (Y-1, X). """ self._sanity_check_bounds() if self.ndim == 1: - contiguous = np.allclose(self.bounds[1:, 0], - self.bounds[:-1, 1], - rtol=rtol, atol=atol) - diffs = np.abs(self.bounds[:-1, 1] - self.bounds[1:, 0]) + contiguous = np.allclose( + self.bounds[1:, 0], self.bounds[:-1, 1], rtol=rtol, atol=atol + ) + diffs = ~np.isclose( + self.bounds[1:, 0], self.bounds[:-1, 1], rtol=rtol, atol=atol + ) elif self.ndim == 2: + def mod360_adjust(compare_axis): bounds = self.bounds.copy() - if compare_axis == 'x': - upper_bounds = bounds[:, :-1, 1] - lower_bounds = bounds[:, 1:, 0] - elif compare_axis == 'y': - upper_bounds = bounds[:-1, :, 3] - lower_bounds = bounds[1:, :, 0] - - if self.name() in ['longitude', 'grid_longitude']: + if compare_axis == "x": + # Extract the pairs of upper bounds and lower bounds which + # connect along the "x" axis. These connect along indices + # as shown by the following diagram: + # + # 3---2 + 3---2 + # | | | | + # 0---1 + 0---1 + upper_bounds = np.stack((bounds[:, :-1, 1], bounds[:, :-1, 2])) + lower_bounds = np.stack((bounds[:, 1:, 0], bounds[:, 1:, 3])) + elif compare_axis == "y": + # Extract the pairs of upper bounds and lower bounds which + # connect along the "y" axis. These connect along indices + # as shown by the following diagram: + # + # 3---2 + # | | + # 0---1 + # + + + # 3---2 + # | | + # 0---1 + upper_bounds = np.stack((bounds[:-1, :, 3], bounds[:-1, :, 2])) + lower_bounds = np.stack((bounds[1:, :, 0], bounds[1:, :, 1])) + + if self.name() in ["longitude", "grid_longitude"]: # If longitude, adjust for longitude wrapping diffs = upper_bounds - lower_bounds - index = diffs > 180 + index = np.abs(diffs) > 180 if index.any(): sign = np.sign(diffs) modification = (index.astype(int) * 360) * sign upper_bounds -= modification - diffs_between_cells = np.abs(upper_bounds - lower_bounds) - cell_size = lower_bounds - upper_bounds - diffs_along_axis = diffs_between_cells > (atol + - rtol * cell_size) + diffs_along_bounds = ~np.isclose( + upper_bounds, lower_bounds, rtol=rtol, atol=atol + ) + diffs_along_axis = np.logical_or( + diffs_along_bounds[0], diffs_along_bounds[1] + ) - points_close_enough = diffs_along_axis <= (atol + - rtol * cell_size) - contiguous_along_axis = np.all(points_close_enough) + contiguous_along_axis = ~np.any(diffs_along_axis) return diffs_along_axis, contiguous_along_axis - diffs_along_x, match_cell_x1 = mod360_adjust(compare_axis='x') - diffs_along_y, match_cell_y1 = mod360_adjust(compare_axis='y') + diffs_along_x, match_cell_x1 = mod360_adjust(compare_axis="x") + diffs_along_y, match_cell_y1 = mod360_adjust(compare_axis="y") contiguous = match_cell_x1 and match_cell_y1 diffs = (diffs_along_x, diffs_along_y) @@ -1138,7 +1922,8 @@ def mod360_adjust(compare_axis): return contiguous, diffs def is_contiguous(self, rtol=1e-05, atol=1e-08): - """ + """Whether coordinate has contiguous bounds. + Return True if, and only if, this Coord is bounded with contiguous bounds to within the specified relative and absolute tolerances. @@ -1150,15 +1935,16 @@ def is_contiguous(self, rtol=1e-05, atol=1e-08): it, and the upper left corner of each cell aligns with the lower left corner of the cell above it. - Args: - - * rtol: + Parameters + ---------- + rtol : float, default=1e-05 The relative tolerance parameter (default is 1e-05). - * atol: + atol : float, default=1e-08 The absolute tolerance parameter (default is 1e-08). - Returns: - Boolean. + Returns + ------- + bool """ if self.has_bounds(): @@ -1167,9 +1953,10 @@ def is_contiguous(self, rtol=1e-05, atol=1e-08): contiguous = False return contiguous - def contiguous_bounds(self): - """ - Returns the N+1 bound values for a contiguous bounded 1D coordinate + def contiguous_bounds(self): # numpydoc ignore=SS05 + """Contiguous bounds of 1D coordinate. + + Return the N+1 bound values for a contiguous bounded 1D coordinate of length N, or the (N+1, M+1) bound values for a contiguous bounded 2D coordinate of shape (N, M). @@ -1189,13 +1976,18 @@ def contiguous_bounds(self): """ if not self.has_bounds(): if self.ndim == 1: - warnings.warn('Coordinate {!r} is not bounded, guessing ' - 'contiguous bounds.'.format(self.name())) + warnings.warn( + "Coordinate {!r} is not bounded, guessing " + "contiguous bounds.".format(self.name()), + category=iris.warnings.IrisGuessBoundsWarning, + ) bounds = self._guess_bounds() elif self.ndim == 2: - raise ValueError('2D coordinate {!r} is not bounded. Guessing ' - 'bounds of 2D coords is not currently ' - 'supported.'.format(self.name())) + raise ValueError( + "2D coordinate {!r} is not bounded. Guessing " + "bounds of 2D coords is not currently " + "supported.".format(self.name()) + ) else: self._sanity_check_bounds() bounds = self.bounds @@ -1209,7 +2001,6 @@ def contiguous_bounds(self): def is_monotonic(self): """Return True if, and only if, this Coord is monotonic.""" - if self.ndim != 1: raise iris.exceptions.CoordinateMultiDimError(self) @@ -1222,63 +2013,45 @@ def is_monotonic(self): if self.has_bounds(): for b_index in range(self.nbounds): - if not iris.util.monotonic(self.bounds[..., b_index], - strict=True): + if not iris.util.monotonic(self.bounds[..., b_index], strict=True): return False return True def is_compatible(self, other, ignore=None): - """ - Return whether the coordinate is compatible with another. + """Return whether the coordinate is compatible with another. Compatibility is determined by comparing :meth:`iris.coords.Coord.name()`, :attr:`iris.coords.Coord.units`, :attr:`iris.coords.Coord.coord_system` and :attr:`iris.coords.Coord.attributes` that are present in both objects. - Args: - - * other: - An instance of :class:`iris.coords.Coord` or - :class:`iris.coords.CoordDefn`. - * ignore: + Parameters + ---------- + other : + An instance of :class:`iris.coords.Coord`, + :class:`iris.common.CoordMetadata` or + :class:`iris.common.DimCoordMetadata`. + ignore : optional A single attribute key or iterable of attribute keys to ignore when comparing the coordinates. Default is None. To ignore all attributes, set this to other.attributes. - Returns: - Boolean. + Returns + ------- + bool """ - compatible = (self.name() == other.name() and - self.units == other.units and - self.coord_system == other.coord_system) - - if compatible: - common_keys = set(self.attributes).intersection(other.attributes) - if ignore is not None: - if isinstance(ignore, six.string_types): - ignore = (ignore,) - common_keys = common_keys.difference(ignore) - for key in common_keys: - if np.any(self.attributes[key] != other.attributes[key]): - compatible = False - break + compatible = False + if self.coord_system == other.coord_system: + compatible = super().is_compatible(other=other, ignore=ignore) return compatible - @property - def dtype(self): - """ - The NumPy dtype of the coord, as specified by its points. - - """ - return self._points_dm.dtype - @property def bounds_dtype(self): - """ + """The NumPy dtype of the coordinates bounds. + The NumPy dtype of the coord's bounds. Will be `None` if the coord does not have bounds. @@ -1288,21 +2061,9 @@ def bounds_dtype(self): result = self._bounds_dm.dtype return result - @property - def ndim(self): - """ - Return the number of dimensions of the coordinate (not including the - bounded dimension). - - """ - return self._points_dm.ndim - @property def nbounds(self): - """ - Return the number of bounds that this coordinate has (0 for no bounds). - - """ + """Return the number of bounds that this coordinate has (0 for no bounds).""" nbounds = 0 if self.has_bounds(): nbounds = self._bounds_dm.shape[-1] @@ -1312,26 +2073,25 @@ def has_bounds(self): """Return a boolean indicating whether the coord has a bounds array.""" return self._bounds_dm is not None - @property - def shape(self): - """The fundamental shape of the Coord, expressed as a tuple.""" - return self._points_dm.shape - def cell(self, index): - """ + """Point/bound cell at the given coordinate index. + Return the single :class:`Cell` instance which results from slicing the points/bounds with the given index. + """ index = iris.util._build_full_slice_given_keys(index, self.ndim) - point = tuple(np.array(self.points[index], ndmin=1).flatten()) + point = tuple(np.array(self.core_points()[index], ndmin=1).flatten()) if len(point) != 1: - raise IndexError('The index %s did not uniquely identify a single ' - 'point to create a cell with.' % (index, )) + raise IndexError( + "The index %s did not uniquely identify a single " + "point to create a cell with." % (index,) + ) bound = None if self.has_bounds(): - bound = tuple(np.array(self.bounds[index], ndmin=1).flatten()) + bound = tuple(np.array(self.core_bounds()[index], ndmin=1).flatten()) if self.units.is_time_reference(): point = self.units.num2date(point) @@ -1341,50 +2101,88 @@ def cell(self, index): return Cell(point, bound) def collapsed(self, dims_to_collapse=None): - """ - Returns a copy of this coordinate, which has been collapsed along - the specified dimensions. + """Return a copy of this coordinate, which has been collapsed along the specified dimensions. Replaces the points & bounds with a simple bounded region. """ - import dask.array as da # Ensure dims_to_collapse is a tuple to be able to pass # through to numpy if isinstance(dims_to_collapse, (int, np.integer)): - dims_to_collapse = (dims_to_collapse, ) + dims_to_collapse = (dims_to_collapse,) if isinstance(dims_to_collapse, list): dims_to_collapse = tuple(dims_to_collapse) if np.issubdtype(self.dtype, np.str_): # Collapse the coordinate by serializing the points and # bounds as strings. - def serialize(x): - return '|'.join([str(i) for i in x.flatten()]) + def serialize(x, axis): + if axis is None: + return "|".join(str(i) for i in x.flatten()) + + # np.apply_along_axis combined with str.join will truncate strings in + # some cases (https://github.com/numpy/numpy/issues/8352), so we need to + # loop through the array directly. First move (possibly multiple) axis + # of interest to trailing dim(s), then make a 2D array we can loop + # through. + work_array = np.moveaxis(x, axis, range(-len(axis), 0)) + out_shape = work_array.shape[: -len(axis)] + work_array = work_array.reshape(np.prod(out_shape, dtype=int), -1) + + joined = [] + for arr_slice in work_array: + joined.append(serialize(arr_slice, None)) + + return np.array(joined).reshape(out_shape) + bounds = None - string_type_fmt = 'S{}' if six.PY2 else 'U{}' if self.has_bounds(): - shape = self._bounds_dm.shape[1:] - bounds = [] - for index in np.ndindex(shape): - index_slice = (slice(None),) + tuple(index) - bounds.append(serialize(self.bounds[index_slice])) - dtype = np.dtype(string_type_fmt.format(max(map(len, bounds)))) - bounds = np.array(bounds, dtype=dtype).reshape((1,) + shape) - points = serialize(self.points) - dtype = np.dtype(string_type_fmt.format(len(points))) + # Express dims_to_collapse as non-negative integers. + if dims_to_collapse is None: + dims_to_collapse = range(self.ndim) + else: + dims_to_collapse = tuple( + dim % self.ndim for dim in dims_to_collapse + ) + bounds = serialize(self.bounds, dims_to_collapse) + + points = serialize(self.points, dims_to_collapse) # Create the new collapsed coordinate. - coord = self.copy(points=np.array(points, dtype=dtype), - bounds=bounds) + coord = self.copy(points=np.array(points), bounds=bounds) else: # Collapse the coordinate by calculating the bounded extremes. if self.ndim > 1: - msg = 'Collapsing a multi-dimensional coordinate. ' \ - 'Metadata may not be fully descriptive for {!r}.' - warnings.warn(msg.format(self.name())) - elif not self.is_contiguous(): - msg = 'Collapsing a non-contiguous coordinate. ' \ - 'Metadata may not be fully descriptive for {!r}.' - warnings.warn(msg.format(self.name())) + msg = ( + "Collapsing a multi-dimensional coordinate. " + "Metadata may not be fully descriptive for {!r}." + ) + warnings.warn( + msg.format(self.name()), + category=iris.warnings.IrisVagueMetadataWarning, + ) + else: + try: + self._sanity_check_bounds() + except ValueError as exc: + msg = ( + "Cannot check if coordinate is contiguous: {} " + "Metadata may not be fully descriptive for {!r}. " + "Ignoring bounds." + ) + warnings.warn( + msg.format(str(exc), self.name()), + category=iris.warnings.IrisVagueMetadataWarning, + ) + self.bounds = None + else: + if not self.is_contiguous(): + msg = ( + "Collapsing a non-contiguous coordinate. " + "Metadata may not be fully descriptive for {!r}." + ) + warnings.warn( + msg.format(self.name()), + category=iris.warnings.IrisVagueMetadataWarning, + ) if self.has_bounds(): item = self.core_bounds() @@ -1392,7 +2190,8 @@ def serialize(x): # Express main dims_to_collapse as non-negative integers # and add the last (bounds specific) dimension. dims_to_collapse = tuple( - dim % self.ndim for dim in dims_to_collapse) + (-1,) + dim % self.ndim for dim in dims_to_collapse + ) + (-1,) else: item = self.core_points() @@ -1400,27 +2199,40 @@ def serialize(x): al = da if _lazy.is_lazy_data(item) else np # Calculate the bounds and points along the right dims - bounds = al.stack([item.min(axis=dims_to_collapse), - item.max(axis=dims_to_collapse)], axis=-1) + bounds = al.stack( + [ + item.min(axis=dims_to_collapse), + item.max(axis=dims_to_collapse), + ], + axis=-1, + ) points = al.array(bounds.sum(axis=-1) * 0.5, dtype=self.dtype) # Create the new collapsed coordinate. coord = self.copy(points=points, bounds=bounds) return coord - def _guess_bounds(self, bound_position=0.5): - """ - Return bounds for this coordinate based on its points. - - Kwargs: + def _guess_bounds(self, bound_position=0.5, monthly=False, yearly=False): + """Return bounds for this coordinate based on its points. - * bound_position: + Parameters + ---------- + bound_position : float, default=0.5 The desired position of the bounds relative to the position of the points. - - Returns: - A numpy array of shape (len(self.points), 2). - + monthly : bool, default=False + If True, the coordinate must be monthly and bounds are set to the + start and ends of each month. + yearly : bool, default=False + If True, the coordinate must be yearly and bounds are set to the + start and ends of each year. + + Returns + ------- + A numpy array of shape (len(self.points), 2). + + Notes + ----- .. note:: This method only works for coordinates with ``coord.ndim == 1``. @@ -1429,48 +2241,97 @@ def _guess_bounds(self, bound_position=0.5): # XXX Consider moving into DimCoord # ensure we have monotonic points if not self.is_monotonic(): - raise ValueError("Need monotonic points to generate bounds for %s" - % self.name()) + raise ValueError( + "Need monotonic points to generate bounds for %s" % self.name() + ) if self.ndim != 1: raise iris.exceptions.CoordinateMultiDimError(self) - if self.shape[0] < 2: - raise ValueError('Cannot guess bounds for a coordinate of length ' - '1.') + if not monthly and self.shape[0] < 2: + raise ValueError("Cannot guess bounds for a coordinate of length 1.") if self.has_bounds(): - raise ValueError('Coord already has bounds. Remove the bounds ' - 'before guessing new ones.') - - if getattr(self, 'circular', False): - points = np.empty(self.shape[0] + 2) - points[1:-1] = self.points - direction = 1 if self.points[-1] > self.points[0] else -1 - points[0] = self.points[-1] - (self.units.modulus * direction) - points[-1] = self.points[0] + (self.units.modulus * direction) - diffs = np.diff(points) + raise ValueError( + "Coord already has bounds. Remove the bounds " + "before guessing new ones." + ) + + if monthly or yearly: + if monthly and yearly: + raise ValueError( + "Cannot guess monthly and yearly bounds simultaneously." + ) + dates = self.units.num2date(self.points) + lower_bounds = [] + upper_bounds = [] + months_and_years = [] + if monthly: + for date in dates: + if date.month == 12: + lyear = date.year + uyear = date.year + 1 + lmonth = 12 + umonth = 1 + else: + lyear = uyear = date.year + lmonth = date.month + umonth = date.month + 1 + date_pair = (date.year, date.month) + if date_pair not in months_and_years: + months_and_years.append(date_pair) + else: + raise ValueError( + "Cannot guess monthly bounds for a coordinate with multiple " + "points in a month." + ) + lower_bounds.append(date.__class__(lyear, lmonth, 1, 0, 0)) + upper_bounds.append(date.__class__(uyear, umonth, 1, 0, 0)) + elif yearly: + for date in dates: + year = date.year + if year not in months_and_years: + months_and_years.append(year) + else: + raise ValueError( + "Cannot guess yearly bounds for a coordinate with multiple " + "points in a year." + ) + lower_bounds.append(date.__class__(date.year, 1, 1, 0, 0)) + upper_bounds.append(date.__class__(date.year + 1, 1, 1, 0, 0)) + bounds = self.units.date2num(np.array([lower_bounds, upper_bounds]).T) + contiguous = np.ma.allclose(bounds[1:, 0], bounds[:-1, 1]) + if not contiguous: + raise ValueError("Cannot guess bounds for a non-contiguous coordinate.") + + # if not monthly or yearly else: - diffs = np.diff(self.points) - diffs = np.insert(diffs, 0, diffs[0]) - diffs = np.append(diffs, diffs[-1]) + if getattr(self, "circular", False): + points = np.empty(self.shape[0] + 2) + points[1:-1] = self.points + direction = 1 if self.points[-1] > self.points[0] else -1 + points[0] = self.points[-1] - (self.units.modulus * direction) + points[-1] = self.points[0] + (self.units.modulus * direction) + diffs = np.diff(points) + else: + diffs = np.diff(self.points) + diffs = np.insert(diffs, 0, diffs[0]) + diffs = np.append(diffs, diffs[-1]) - min_bounds = self.points - diffs[:-1] * bound_position - max_bounds = self.points + diffs[1:] * (1 - bound_position) + min_bounds = self.points - diffs[:-1] * bound_position + max_bounds = self.points + diffs[1:] * (1 - bound_position) - bounds = np.array([min_bounds, max_bounds]).transpose() + bounds = np.array([min_bounds, max_bounds]).transpose() - if (self.name() in ('latitude', 'grid_latitude') and - self.units == 'degree'): - points = self.points - if (points >= -90).all() and (points <= 90).all(): - np.clip(bounds, -90, 90, out=bounds) + if self.name() in ("latitude", "grid_latitude") and self.units == "degree": + points = self.points + if (points >= -90).all() and (points <= 90).all(): + np.clip(bounds, -90, 90, out=bounds) return bounds - def guess_bounds(self, bound_position=0.5): - """ - Add contiguous bounds to a coordinate, calculated from its points. + def guess_bounds(self, bound_position=0.5, monthly=False, yearly=False): + """Add contiguous bounds to a coordinate, calculated from its points. Puts a cell boundary at the specified fraction between each point and the next, plus extrapolated lowermost and uppermost bound points, so @@ -1481,12 +2342,21 @@ def guess_bounds(self, bound_position=0.5): With irregular points, the first and last cells are given the same widths as the ones next to them. - Kwargs: - - * bound_position: + Parameters + ---------- + bound_position : float, default=0.5 The desired position of the bounds relative to the position of the points. + monthly : bool, default=False + If True, the coordinate must be monthly and bounds are set to the + start and ends of each month. + yearly : bool, default=False + If True, the coordinate must be yearly and bounds are set to the + start and ends of each year. + + Notes + ----- .. note:: An error is raised if the coordinate already has bounds, is not @@ -1498,44 +2368,50 @@ def guess_bounds(self, bound_position=0.5): produce unexpected results : In such cases you should assign suitable values directly to the bounds property, instead. + .. note:: + + Monthly and Yearly work differently from the standard case. They + can work for single points but cannot be used together. + + """ - self.bounds = self._guess_bounds(bound_position) + self.bounds = self._guess_bounds(bound_position, monthly, yearly) def intersect(self, other, return_indices=False): - """ - Returns a new coordinate from the intersection of two coordinates. + """Return a new coordinate from the intersection of two coordinates. Both coordinates must be compatible as defined by :meth:`~iris.coords.Coord.is_compatible`. - Kwargs: - - * return_indices: + Parameters + ---------- + return_indices : bool, default=False If True, changes the return behaviour to return the intersection indices for the "self" coordinate. """ if not self.is_compatible(other): - msg = 'The coordinates cannot be intersected. They are not ' \ - 'compatible because of differing metadata.' + msg = ( + "The coordinates cannot be intersected. They are not " + "compatible because of differing metadata." + ) raise ValueError(msg) - # Cache self.cells for speed. We can also use the index operation on a - # list conveniently. - self_cells = [cell for cell in self.cells()] + # Cache self.cells for speed. We can also use the dict for fast index + # lookup. + self_cells = {cell: idx for idx, cell in enumerate(self.cells())} # Maintain a list of indices on self for which cells exist in both self # and other. self_intersect_indices = [] for cell in other.cells(): - try: - self_intersect_indices.append(self_cells.index(cell)) - except ValueError: - pass + if cell in self_cells: + self_intersect_indices.append(self_cells[cell]) if return_indices is False and self_intersect_indices == []: - raise ValueError('No intersection between %s coords possible.' % - self.name()) + raise ValueError( + "No intersection between %s coords possible." % self.name() + ) self_intersect_indices = np.array(self_intersect_indices) @@ -1547,8 +2423,7 @@ def intersect(self, other, return_indices=False): return self[self_intersect_indices] def nearest_neighbour_index(self, point): - """ - Returns the index of the cell nearest to the given point. + """Return the index of the cell nearest to the given point. Only works for one-dimensional coordinates. @@ -1570,9 +2445,11 @@ def nearest_neighbour_index(self, point): points = self.points bounds = self.bounds if self.has_bounds() else np.array([]) if self.ndim != 1: - raise ValueError('Nearest-neighbour is currently limited' - ' to one-dimensional coordinates.') - do_circular = getattr(self, 'circular', False) + raise ValueError( + "Nearest-neighbour is currently limited" + " to one-dimensional coordinates." + ) + do_circular = getattr(self, "circular", False) if do_circular: wrap_modulus = self.units.modulus # wrap 'point' to a range based on lowest points or bounds value. @@ -1590,7 +2467,9 @@ def nearest_neighbour_index(self, point): if self.has_bounds(): # make bounds ranges complete+separate, so point is in at least one increasing = self.bounds[0, 1] > self.bounds[0, 0] - bounds = bounds.copy() + # identify data type that bounds and point can safely cast to + dtype = np.result_type(bounds, point) + bounds = bounds.astype(dtype) # sort the bounds cells by their centre values sort_inds = np.argsort(np.mean(bounds, axis=1)) bounds = bounds[sort_inds] @@ -1608,8 +2487,10 @@ def nearest_neighbour_index(self, point): bounds[0, 0] = min(point, bounds[0, 0]) bounds[-1, 1] = max(point, bounds[-1, 1]) # get index of first-occurring cell that contains the point - inside_cells = np.logical_and(point >= np.min(bounds, axis=1), - point <= np.max(bounds, axis=1)) + inside_cells = np.logical_and( + point >= np.min(bounds, axis=1), + point <= np.max(bounds, axis=1), + ) result_index = np.where(inside_cells)[0][0] # return the original index of the cell (before the bounds sort) result_index = sort_inds[result_index] @@ -1637,137 +2518,86 @@ def nearest_neighbour_index(self, point): return result_index def xml_element(self, doc): - """Return a DOM element describing this Coord.""" - # Create the XML element as the camelCaseEquivalent of the - # class name. - element_name = type(self).__name__ - element_name = element_name[0].lower() + element_name[1:] - element = doc.createElement(element_name) - - element.setAttribute('id', self._xml_id()) - - if self.standard_name: - element.setAttribute('standard_name', str(self.standard_name)) - if self.long_name: - element.setAttribute('long_name', str(self.long_name)) - if self.var_name: - element.setAttribute('var_name', str(self.var_name)) - element.setAttribute('units', repr(self.units)) - if self.climatological: - element.setAttribute('climatological', str(self.climatological)) + """Create the :class:`xml.dom.minidom.Element` that describes this :class:`Coord`. - if self.attributes: - attributes_element = doc.createElement('attributes') - for name in sorted(six.iterkeys(self.attributes)): - attribute_element = doc.createElement('attribute') - attribute_element.setAttribute('name', name) - attribute_element.setAttribute('value', - str(self.attributes[name])) - attributes_element.appendChild(attribute_element) - element.appendChild(attributes_element) + Parameters + ---------- + doc : + The parent :class:`xml.dom.minidom.Document`. - # Add a coord system sub-element? - if self.coord_system: - element.appendChild(self.coord_system.xml_element(doc)) + Returns + ------- + :class:`xml.dom.minidom.Element` + The :class:`xml.dom.minidom.Element` that will describe this + :class:`DimCoord`. - # Add the values - element.setAttribute('value_type', str(self._value_type_name())) - element.setAttribute('shape', str(self.shape)) - if hasattr(self.points, 'to_xml_attr'): - element.setAttribute('points', self.points.to_xml_attr()) - else: - element.setAttribute('points', iris.util.format_array(self.points)) + """ + # Create the XML element as the camelCaseEquivalent of the + # class name + element = super().xml_element(doc=doc) + # Add bounds, points are handled by the parent class. if self.has_bounds(): - if hasattr(self.bounds, 'to_xml_attr'): - element.setAttribute('bounds', self.bounds.to_xml_attr()) - else: - element.setAttribute('bounds', - iris.util.format_array(self.bounds)) + element.setAttribute("bounds", self._xml_array_repr(self.bounds)) return element - def _xml_id(self): - # Returns a consistent, unique string identifier for this coordinate. - unique_value = b'' - if self.standard_name: - unique_value += self.standard_name.encode('utf-8') - unique_value += b'\0' - if self.long_name: - unique_value += self.long_name.encode('utf-8') - unique_value += b'\0' - unique_value += str(self.units).encode('utf-8') + b'\0' - for k, v in sorted(self.attributes.items()): - unique_value += (str(k) + ':' + str(v)).encode('utf-8') + b'\0' - unique_value += str(self.coord_system).encode('utf-8') + b'\0' - # Mask to ensure consistency across Python versions & platforms. - crc = zlib.crc32(unique_value) & 0xffffffff - return '%08x' % (crc, ) + def _xml_id_extra(self, unique_value): + """Coord specific stuff for the xml id.""" + unique_value += str(self.coord_system).encode("utf-8") + b"\0" + return unique_value - def _value_type_name(self): - """ - A simple, readable name for the data type of the Coord point/bound - values. - """ - dtype = self.core_points().dtype - kind = dtype.kind - if kind in 'SU': - # Establish the basic type name for 'string' type data. - # N.B. this means "unicode" in Python3, and "str" in Python2. - value_type_name = 'string' +_regular_points = lru_cache(iris.util.regular_points) +"""Caching version of iris.util.regular_points""" - # Override this if not the 'native' string type. - if six.PY3: - if kind == 'S': - value_type_name = 'bytes' - else: - if kind == 'U': - value_type_name = 'unicode' - else: - value_type_name = dtype.name - return value_type_name +class DimCoord(Coord): + """A coordinate that is 1D, and numeric. + With values that have a strict monotonic ordering. Missing values are not + permitted in a :class:`DimCoord`. -class DimCoord(Coord): """ - A coordinate that is 1D, numeric, and strictly monotonic. - """ @classmethod - def from_regular(cls, zeroth, step, count, standard_name=None, - long_name=None, var_name=None, units='1', attributes=None, - coord_system=None, circular=False, with_bounds=False): - """ - Create a :class:`DimCoord` with regularly spaced points, and - optionally bounds. + def from_regular( + cls, + zeroth, + step, + count, + standard_name=None, + long_name=None, + var_name=None, + units=None, + attributes=None, + coord_system=None, + circular=False, + climatological=False, + with_bounds=False, + ): + """Create a :class:`DimCoord` with regularly spaced points, and optionally bounds. The majority of the arguments are defined as for - :meth:`Coord.__init__`, but those which differ are defined below. + :class:`Coord`, but those which differ are defined below. - Args: - - * zeroth: + Parameters + ---------- + zeroth : The value *prior* to the first point value. - * step: + step : The numeric difference between successive point values. - * count: + count : The number of point values. - - Kwargs: - - * with_bounds: + with_bounds : bool, default=False If True, the resulting DimCoord will possess bound values which are equally spaced around the points. Otherwise no bounds values will be defined. Defaults to False. """ - points = (zeroth + step) + step * np.arange(count, dtype=np.float32) - _, regular = points_step(points) - if not regular: - points = (zeroth + step) + step * np.arange(count, - dtype=np.float64) + # Use lru_cache because this is done repeatedly with the same arguments + # (particularly in field-based file loading). + points = _regular_points(zeroth, step, count).copy() points.flags.writeable = False if with_bounds: @@ -1777,60 +2607,136 @@ def from_regular(cls, zeroth, step, count, standard_name=None, else: bounds = None - return cls(points, standard_name=standard_name, - long_name=long_name, var_name=var_name, units=units, - bounds=bounds, attributes=attributes, - coord_system=coord_system, circular=circular) - - def __init__(self, points, standard_name=None, long_name=None, - var_name=None, units='1', bounds=None, - attributes=None, coord_system=None, circular=False, - climatological=False): + return cls( + points, + standard_name=standard_name, + long_name=long_name, + var_name=var_name, + units=units, + bounds=bounds, + attributes=attributes, + coord_system=coord_system, + circular=circular, + climatological=climatological, + ) + + def __init__( + self, + points, + standard_name=None, + long_name=None, + var_name=None, + units=None, + bounds=None, + attributes=None, + coord_system=None, + circular=False, + climatological=False, + ): + """Create a 1D, numeric, and strictly monotonic coordinate with **immutable** points and bounds. + + Missing values are not permitted. + + Parameters + ---------- + points : + 1D numpy array-like of values (or single value in the case of a + scalar coordinate) for each cell of the coordinate. The values + must be strictly monotonic and masked values are not allowed. + standard_name : optional + CF standard name of the coordinate. + long_name : optional + Descriptive name of the coordinate. + var_name : optional + The netCDF variable name for the coordinate. + units : :class:`~cf_units.Unit`, optional + The :class:`~cf_units.Unit` of the coordinate's values. + Can be a string, which will be converted to a Unit object. + bounds : optional + An array of values describing the bounds of each cell. Given n + bounds and m cells, the shape of the bounds array should be + (m, n). For each bound, the values must be strictly monotonic along + the cells, and the direction of monotonicity must be consistent + across the bounds. For example, a DimCoord with 100 points and two + bounds per cell would have a bounds array of shape (100, 2), and + the slices ``bounds[:, 0]`` and ``bounds[:, 1]`` would be monotonic + in the same direction. Masked values are not allowed. + Note if the data is a climatology, `climatological` + should be set. + attributes : optional + A dictionary containing other CF and user-defined attributes. + coord_system : :class:`~iris.coord_systems.CoordSystem`, optional + A :class:`~iris.coord_systems.CoordSystem` representing the + coordinate system of the coordinate, + e.g., a :class:`~iris.coord_systems.GeogCS` for a longitude coordinate. + circular : bool, default=False + Whether the coordinate wraps by the :attr:`~iris.coords.DimCoord.units.modulus` + i.e., the longitude coordinate wraps around the full great circle. + climatological : bool, default=False + When True: the coordinate is a NetCDF climatological time axis. + When True: saving in NetCDF will give the coordinate variable a + 'climatology' attribute and will create a boundary variable called + '_climatology' in place of a standard bounds + attribute and bounds variable. + Will set to True when a climatological time axis is loaded + from NetCDF. + Always False if no bounds exist. """ - Create a 1D, numeric, and strictly monotonic :class:`Coord` with - read-only points and bounds. + # Configure the metadata manager. + self._metadata_manager = metadata_manager_factory(DimCoordMetadata) - """ - super(DimCoord, self).__init__( - points, standard_name=standard_name, - long_name=long_name, var_name=var_name, - units=units, bounds=bounds, + super().__init__( + points, + standard_name=standard_name, + long_name=long_name, + var_name=var_name, + units=units, + bounds=bounds, attributes=attributes, coord_system=coord_system, - climatological=climatological) + climatological=climatological, + ) #: Whether the coordinate wraps by ``coord.units.modulus``. - self.circular = bool(circular) + self.circular = circular - def __deepcopy__(self, memo): - """ - coord.__deepcopy__() -> Deep copy of coordinate. + def __deepcopy__(self, memo): # numpydoc ignore=SS02 + """coord.__deepcopy__() -> Deep copy of coordinate. Used if copy.deepcopy is called on a coordinate. """ - new_coord = copy.deepcopy(super(DimCoord, self), memo) + new_coord = copy.deepcopy(super(), memo) # Ensure points and bounds arrays are read-only. - new_coord._points_dm.data.flags.writeable = False + new_coord._values_dm.data.flags.writeable = False if new_coord._bounds_dm is not None: new_coord._bounds_dm.data.flags.writeable = False return new_coord + @property + def circular(self): + return self._metadata_manager.circular + + @circular.setter + def circular(self, circular): + self._metadata_manager.circular = bool(circular) + def copy(self, points=None, bounds=None): - new_coord = super(DimCoord, self).copy(points=points, bounds=bounds) + new_coord = super().copy(points=points, bounds=bounds) # Make the arrays read-only. - new_coord._points_dm.data.flags.writeable = False + new_coord._values_dm.data.flags.writeable = False if bounds is not None: new_coord._bounds_dm.data.flags.writeable = False return new_coord def __eq__(self, other): - # TODO investigate equality of AuxCoord and DimCoord if circular is - # False. result = NotImplemented if isinstance(other, DimCoord): - result = (Coord.__eq__(self, other) and self.circular == - other.circular) + # The "circular" member participates in DimCoord to DimCoord + # equivalence. We require to do this explicitly here + # as the "circular" member does NOT participate in + # DimCoordMetadata to DimCoordMetadata equivalence. + result = self.circular == other.circular and super().__eq__(other) return result # The __ne__ operator from Coord implements the not __eq__ method. @@ -1844,7 +2750,7 @@ def __eq__(self, other): __hash__ = Coord.__hash__ def __getitem__(self, key): - coord = super(DimCoord, self).__getitem__(key) + coord = super().__getitem__(key) coord.circular = self.circular and coord.shape == self.shape return coord @@ -1854,43 +2760,43 @@ def collapsed(self, dims_to_collapse=None): bnds = coord.bounds.copy() bnds[0, 1] = coord.bounds[0, 0] + self.units.modulus coord.bounds = bnds - coord.points = np.array(np.sum(coord.bounds) * 0.5, - dtype=self.points.dtype) + coord.points = np.array(np.sum(coord.bounds) * 0.5, dtype=self.points.dtype) # XXX This isn't actually correct, but is ported from the old world. coord.circular = False return coord - def _repr_other_metadata(self): - result = Coord._repr_other_metadata(self) - if self.circular: - result += ', circular=%r' % self.circular - return result - def _new_points_requirements(self, points): - """ + """Confirm that a new set of coord points adheres to the requirements. + Confirm that a new set of coord points adheres to the requirements for :class:`~iris.coords.DimCoord` points, being: - * points are scalar or 1D, - * points are numeric, - * points are not masked, and - * points are monotonic. + + * points are scalar or 1D, + * points are numeric, + * points are not masked, and + * points are monotonic. """ if points.ndim not in (0, 1): - emsg = 'The {!r} {} points array must be scalar or 1-dimensional.' + emsg = "The {!r} {} points array must be scalar or 1-dimensional." raise ValueError(emsg.format(self.name(), self.__class__.__name__)) if not np.issubdtype(points.dtype, np.number): - emsg = 'The {!r} {} points array must be numeric.' + emsg = "The {!r} {} points array must be numeric." raise ValueError(emsg.format(self.name(), self.__class__.__name__)) if ma.is_masked(points): - emsg = 'A {!r} {} points array must not be masked.' + emsg = "A {!r} {} points array must not be masked." raise TypeError(emsg.format(self.name(), self.__class__.__name__)) if points.size > 1 and not iris.util.monotonic(points, strict=True): - emsg = 'The {!r} {} points array must be strictly monotonic.' + emsg = "The {!r} {} points array must be strictly monotonic." raise ValueError(emsg.format(self.name(), self.__class__.__name__)) - @Coord.points.setter - def points(self, points): + @property + def _values(self): + # Overridden just to allow .setter override. + return super()._values + + @_values.setter + def _values(self, points): # DimCoord always realises the points, to allow monotonicity checks. # Ensure it is an actual array, and also make our own copy so that we # can make it read-only. @@ -1903,40 +2809,48 @@ def points(self, points): # Cast to a numpy array for masked arrays with no mask. points = np.array(points) - # Call the parent points setter. - super(DimCoord, self.__class__).points.fset(self, points) + super(DimCoord, self.__class__)._values.fset(self, points) - if self._points_dm is not None: + if self._values_dm is not None: # Re-fetch the core array, as the super call may replace it. - points = self._points_dm.core_data() + points = self._values_dm.core_data() # N.B. always a *real* array, as we realised 'points' at the start. # Make the array read-only. points.flags.writeable = False def _new_bounds_requirements(self, bounds): - """ + """Confirm that a new set of coord bounds adheres to the requirements. + Confirm that a new set of coord bounds adheres to the requirements for :class:`~iris.coords.DimCoord` bounds, being: - * bounds are compatible in shape with the points - * bounds are numeric, - * bounds are not masked, and - * bounds are monotonic in the first dimension. + + * bounds are compatible in shape with the points + * bounds are numeric, + * bounds are not masked, and + * bounds are monotonic in the first dimension. + + Also reverse the order of the second dimension if necessary to match the + first dimension's direction. I.e. both should increase or both should + decrease. """ # Ensure the bounds are a compatible shape. - if self.shape != bounds.shape[:-1] and \ - not (self.shape == (1,) and bounds.ndim == 1): - emsg = ('The shape of the {!r} {} bounds array should be ' - 'points.shape + (n_bounds)') + if self.shape != bounds.shape[:-1] and not ( + self.shape == (1,) and bounds.ndim == 1 + ): + emsg = ( + "The shape of the {!r} {} bounds array should be " + "points.shape + (n_bounds)" + ) raise ValueError(emsg.format(self.name(), self.__class__.__name__)) # Checks for numeric. if not np.issubdtype(bounds.dtype, np.number): - emsg = 'The {!r} {} bounds array must be numeric.' + emsg = "The {!r} {} bounds array must be numeric." raise ValueError(emsg.format(self.name(), self.__class__.__name__)) # Check not masked. if ma.is_masked(bounds): - emsg = 'A {!r} {} bounds array must not be masked.' + emsg = "A {!r} {} bounds array must not be masked." raise TypeError(emsg.format(self.name(), self.__class__.__name__)) # Check bounds are monotonic. @@ -1944,25 +2858,41 @@ def _new_bounds_requirements(self, bounds): n_bounds = bounds.shape[-1] n_points = bounds.shape[0] if n_points > 1: - directions = set() for b_index in range(n_bounds): monotonic, direction = iris.util.monotonic( - bounds[:, b_index], strict=True, return_direction=True) + bounds[:, b_index], strict=True, return_direction=True + ) if not monotonic: - emsg = ('The {!r} {} bounds array must be strictly ' - 'monotonic.') - raise ValueError(emsg.format(self.name(), - self.__class__.__name__)) + emsg = "The {!r} {} bounds array must be strictly monotonic." + raise ValueError( + emsg.format(self.name(), self.__class__.__name__) + ) directions.add(direction) if len(directions) != 1: - emsg = ('The direction of monotonicity for {!r} {} must ' - 'be consistent across all bounds.') - raise ValueError(emsg.format(self.name(), - self.__class__.__name__)) + emsg = ( + "The direction of monotonicity for {!r} {} must " + "be consistent across all bounds." + ) + raise ValueError(emsg.format(self.name(), self.__class__.__name__)) + + if n_bounds == 2: + # Make ordering of bounds consistent with coord's direction + # if possible. + (direction,) = directions + diffs = bounds[:, 0] - bounds[:, 1] + if np.all(np.sign(diffs) == direction): + bounds = np.flip(bounds, axis=1) + + return bounds - @Coord.bounds.setter + @property + def bounds(self): + # Overridden just to allow .setter override. + return super().bounds + + @bounds.setter def bounds(self, bounds): if bounds is not None: # Ensure we have a realised array of new bounds values. @@ -1970,8 +2900,9 @@ def bounds(self, bounds): # Make sure we have an array (any type of array). bounds = np.asanyarray(bounds) - # Check validity requirements for dimension-coordinate bounds. - self._new_bounds_requirements(bounds) + # Check validity requirements for dimension-coordinate bounds and reverse + # trailing dimension if necessary. + bounds = self._new_bounds_requirements(bounds) # Cast to a numpy array for masked arrays with no mask. bounds = np.array(bounds) @@ -1990,23 +2921,73 @@ def is_monotonic(self): return True def xml_element(self, doc): - """Return DOM element describing this :class:`iris.coords.DimCoord`.""" - element = super(DimCoord, self).xml_element(doc) + """Create the :class:`xml.dom.minidom.Element` that describes this :class:`DimCoord`. + + Parameters + ---------- + doc : + The parent :class:`xml.dom.minidom.Document`. + + Returns + ------- + :class:`xml.dom.minidom.Element` + The :class:`xml.dom.minidom.Element` that describes this + :class:`DimCoord`. + + """ + element = super().xml_element(doc) if self.circular: - element.setAttribute('circular', str(self.circular)) + element.setAttribute("circular", str(self.circular)) return element class AuxCoord(Coord): - """ - A CF auxiliary coordinate. + """A CF auxiliary coordinate.""" + + def __init__(self, *args, **kwargs): + """Create a coordinate with **mutable** points and bounds. - .. note:: + Parameters + ---------- + points : + The values (or value in the case of a scalar coordinate) for each + cell of the coordinate. + standard_name : optional + CF standard name of the coordinate. + long_name : optional + Descriptive name of the coordinate. + var_name : optional + The netCDF variable name for the coordinate. + unit : :class:`~cf_units.Unit`, optional + The :class:`~cf_units.Unit` of the coordinate's values. + Can be a string, which will be converted to a Unit object. + bounds : optional + An array of values describing the bounds of each cell. Given n + bounds for each cell, the shape of the bounds array should be + points.shape + (n,). For example, a 1D coordinate with 100 points + and two bounds per cell would have a bounds array of shape + (100, 2) + Note if the data is a climatology, `climatological` + should be set. + attributes : optional + A dictionary containing other CF and user-defined attributes. + coord_system : :class:`~iris.coord_systems.CoordSystem`, optional + A :class:`~iris.coord_systems.CoordSystem` representing the + coordinate system of the coordinate, + e.g., a :class:`~iris.coord_systems.GeogCS` for a longitude coordinate. + climatological bool, optional + When True: the coordinate is a NetCDF climatological time axis. + When True: saving in NetCDF will give the coordinate variable a + 'climatology' attribute and will create a boundary variable called + '_climatology' in place of a standard bounds + attribute and bounds variable. + Will set to True when a climatological time axis is loaded + from NetCDF. + Always False if no bounds exist. - There are currently no specific properties of :class:`AuxCoord`, - everything is inherited from :class:`Coord`. + """ + super().__init__(*args, **kwargs) - """ # Logically, :class:`Coord` is an abstract class and all actual coords must # be members of some concrete subclass, i.e. an :class:`AuxCoord` or # a :class:`DimCoord`. @@ -2015,206 +2996,11 @@ class AuxCoord(Coord): # AuxCoord-specific code if needed in future. -class CellMeasure(six.with_metaclass(ABCMeta, CFVariableMixin)): - """ - A CF Cell Measure, providing area or volume properties of a cell - where these cannot be inferred from the Coordinates and - Coordinate Reference System. - - """ - - def __init__(self, data, standard_name=None, long_name=None, - var_name=None, units='1', attributes=None, measure=None): - - """ - Constructs a single cell measure. - - Args: - - * data: - The values of the measure for each cell. - Either a 'real' array (:class:`numpy.ndarray`) or a 'lazy' array - (:class:`dask.array.Array`). - - Kwargs: - - * standard_name: - CF standard name of the cell measure. - * long_name: - Descriptive name of the cell measure. - * var_name: - The netCDF variable name for the cell measure. - * units - The :class:`~cf_units.Unit` of the cell measure's values. - Can be a string, which will be converted to a Unit object. - * attributes - A dictionary containing other CF and user-defined attributes. - * measure - A string describing the type of measure. 'area' and 'volume' - are the only valid entries. - - """ - #: CF standard name of the quantity that the cell measure represents. - self.standard_name = standard_name - - #: Descriptive name of the cell measure. - self.long_name = long_name - - #: The netCDF variable name for the cell measure. - self.var_name = var_name - - #: Unit of the quantity that the cell measure represents. - self.units = units - - #: Other attributes, including user specified attributes that - #: have no meaning to Iris. - self.attributes = attributes - - #: String naming the measure type. - self.measure = measure - - # Initialise data via the data setter code, which applies standard - # checks and ajustments. - self.data = data - - @property - def measure(self): - return self._measure - - @property - def data(self): - """Property containing the data values as a numpy array""" - return self._data_manager.data - - @data.setter - def data(self, data): - # Set the data to a new array - as long as it's the same shape. - # If data are already defined for this CellMeasure, - if data is None: - raise ValueError('The data payload of a CellMeasure may not be ' - 'None; it must be a numpy array or equivalent.') - if data.shape == (): - # If we have a scalar value, promote the shape from () to (1,). - # NOTE: this way also *realises* it. Don't think that matters. - data = np.array(data, ndmin=1) - if hasattr(self, '_data_manager') and self._data_manager is not None: - # Check that setting these data wouldn't change self.shape - if data.shape != self.shape: - raise ValueError("New data shape must match existing data " - "shape.") - - self._data_manager = DataManager(data) - - @property - def shape(self): - """Returns the shape of the Cell Measure, expressed as a tuple.""" - return self._data_manager.shape - - @property - def ndim(self): - """Returns the number of dimensions of the cell measure.""" - return self._data_manager.ndim - - @measure.setter - def measure(self, measure): - if measure not in ['area', 'volume']: - raise ValueError("measure must be 'area' or 'volume', " - "not {}".format(measure)) - self._measure = measure - - def __getitem__(self, keys): - """ - Returns a new CellMeasure whose values are obtained by - conventional array indexing. - - """ - # Get the data, all or part of which will become the new data. - data = self._data_manager.core_data() - - # Index data with the keys. - # Note: does not copy data unless it has to. - _, data = iris.util._slice_data_with_keys(data, keys) - - # Always copy data, to avoid making the new measure a view onto the old - # one. - data = data.copy() - - # The result is a copy with replacement data. - return self.copy(data=data) - - def copy(self, data=None): - """ - Returns a copy of this CellMeasure. - - Kwargs: - - * data: A data array for the new cell_measure. - This may be a different shape to the data of the - cell_measure being copied. - - """ - new_cell_measure = copy.deepcopy(self) - if data is not None: - # Remove the existing data manager, to prevent the data setter - # checking against existing content. - new_cell_measure._data_manager = None - # Set new data via the data setter code, which applies standard - # checks and ajustments. - new_cell_measure.data = data - - return new_cell_measure - - def _repr_other_metadata(self): - fmt = '' - if self.long_name: - fmt = ', long_name={self.long_name!r}' - if self.var_name: - fmt += ', var_name={self.var_name!r}' - if len(self.attributes) > 0: - fmt += ', attributes={self.attributes}' - result = fmt.format(self=self) - return result - - def __str__(self): - result = repr(self) - return result - - def __repr__(self): - fmt = ('{cls}({self.data!r}' - ', measure={self.measure}, standard_name={self.standard_name!r}' - ', units={self.units!r}{other_metadata})') - result = fmt.format(self=self, cls=type(self).__name__, - other_metadata=self._repr_other_metadata()) - return result - - def _as_defn(self): - defn = (self.standard_name, self.long_name, self.var_name, - self.units, self.attributes, self.measure) - return defn - - def __eq__(self, other): - eq = NotImplemented - if isinstance(other, CellMeasure): - eq = self._as_defn() == other._as_defn() - if eq: - eq = (self.data == other.data).all() - return eq - - def __ne__(self, other): - result = self.__eq__(other) - if result is not NotImplemented: - result = not result - return result - - class CellMethod(iris.util._OrderedHashable): - """ - Represents a sub-cell pre-processing operation. - - """ + """Represents a sub-cell pre-processing operation.""" # Declare the attribute names relevant to the _OrderedHashable behaviour. - _names = ('method', 'coord_names', 'intervals', 'comments') + _names = ("method", "coord_names", "intervals", "comments") #: The name of the operation that was applied. e.g. "mean", "max", etc. method = None @@ -2230,49 +3016,47 @@ class CellMethod(iris.util._OrderedHashable): comments = None def __init__(self, method, coords=None, intervals=None, comments=None): - """ - Args: + """Call Method initialise. - * method: + Parameters + ---------- + method : The name of the operation. - - Kwargs: - - * coords: + coords : :class:`.Coord` instances, optional A single instance or sequence of :class:`.Coord` instances or coordinate names. - - * intervals: + intervals : optional A single string, or a sequence strings, describing the intervals within the cell method. - - * comments: + comments : optional A single string, or a sequence strings, containing any additional comments. """ - if not isinstance(method, six.string_types): - raise TypeError("'method' must be a string - got a '%s'" % - type(method)) + if not isinstance(method, str): + raise TypeError("'method' must be a string - got a '%s'" % type(method)) - default_name = CFVariableMixin._DEFAULT_NAME + default_name = BaseMetadata.DEFAULT_NAME _coords = [] + if coords is None: pass elif isinstance(coords, Coord): _coords.append(coords.name(token=True)) - elif isinstance(coords, six.string_types): - _coords.append(CFVariableMixin.token(coords) or default_name) + elif isinstance(coords, str): + _coords.append(BaseMetadata.token(coords) or default_name) else: - normalise = (lambda coord: coord.name(token=True) if - isinstance(coord, Coord) else - CFVariableMixin.token(coord) or default_name) + normalise = ( + lambda coord: coord.name(token=True) + if isinstance(coord, Coord) + else BaseMetadata.token(coord) or default_name + ) _coords.extend([normalise(coord) for coord in coords]) _intervals = [] if intervals is None: pass - elif isinstance(intervals, six.string_types): + elif isinstance(intervals, str): _intervals = [intervals] else: _intervals.extend(intervals) @@ -2280,7 +3064,7 @@ def __init__(self, method, coords=None, intervals=None, comments=None): _comments = [] if comments is None: pass - elif isinstance(comments, six.string_types): + elif isinstance(comments, str): _comments = [comments] else: _comments.extend(comments) @@ -2288,86 +3072,58 @@ def __init__(self, method, coords=None, intervals=None, comments=None): self._init(method, tuple(_coords), tuple(_intervals), tuple(_comments)) def __str__(self): - """Return a custom string representation of CellMethod""" + """Return a custom string representation of CellMethod.""" # Group related coord names intervals and comments together - cell_components = zip_longest(self.coord_names, self.intervals, - self.comments, fillvalue="") + coord_string = " ".join([f"{coord}:" for coord in self.coord_names]) + method_string = str(self.method) + interval_string = " ".join( + [f"interval: {interval}" for interval in self.intervals] + ) + comment_string = " ".join([comment for comment in self.comments]) - collection_summaries = [] - cm_summary = "%s: " % self.method + if interval_string and comment_string: + comment_string = "".join( + [f" comment: {comment}" for comment in self.comments] + ) + cm_summary = f"{coord_string} {method_string}" - for coord_name, interval, comment in cell_components: - other_info = ", ".join(filter(None, chain((interval, comment)))) - if other_info: - coord_summary = "%s (%s)" % (coord_name, other_info) - else: - coord_summary = "%s" % coord_name - - collection_summaries.append(coord_summary) + if interval_string or comment_string: + cm_summary += f" ({interval_string}{comment_string})" - return cm_summary + ", ".join(collection_summaries) + return cm_summary def __add__(self, other): # Disable the default tuple behaviour of tuple concatenation - raise NotImplementedError() + return NotImplemented def xml_element(self, doc): - """ - Return a dom element describing itself + """Create the :class:`xml.dom.minidom.Element` that describes this :class:`CellMethod`. + + Parameters + ---------- + doc : + The parent :class:`xml.dom.minidom.Document`. + + Returns + ------- + :class:`xml.dom.minidom.Element` + The :class:`xml.dom.minidom.Element` that describes this + :class:`CellMethod`. """ - cellMethod_xml_element = doc.createElement('cellMethod') - cellMethod_xml_element.setAttribute('method', self.method) + cellMethod_xml_element = doc.createElement("cellMethod") + cellMethod_xml_element.setAttribute("method", self.method) - for coord_name, interval, comment in zip_longest(self.coord_names, - self.intervals, - self.comments): - coord_xml_element = doc.createElement('coord') + for coord_name, interval, comment in zip_longest( + self.coord_names, self.intervals, self.comments + ): + coord_xml_element = doc.createElement("coord") if coord_name is not None: - coord_xml_element.setAttribute('name', coord_name) + coord_xml_element.setAttribute("name", coord_name) if interval is not None: - coord_xml_element.setAttribute('interval', interval) + coord_xml_element.setAttribute("interval", interval) if comment is not None: - coord_xml_element.setAttribute('comment', comment) + coord_xml_element.setAttribute("comment", comment) cellMethod_xml_element.appendChild(coord_xml_element) return cellMethod_xml_element - - -# See Coord.cells() for the description/context. -class _CellIterator(Iterator): - def __init__(self, coord): - self._coord = coord - if coord.ndim != 1: - raise iris.exceptions.CoordinateMultiDimError(coord) - self._indices = iter(range(coord.shape[0])) - - def __next__(self): - # NB. When self._indices runs out it will raise StopIteration for us. - i = next(self._indices) - return self._coord.cell(i) - - next = __next__ - - -# See ExplicitCoord._group() for the description/context. -class _GroupIterator(Iterator): - def __init__(self, points): - self._points = points - self._start = 0 - - def __next__(self): - num_points = len(self._points) - if self._start >= num_points: - raise StopIteration - - stop = self._start + 1 - m = self._points[self._start] - while stop < num_points and self._points[stop] == m: - stop += 1 - - group = _GroupbyItem(m, slice(self._start, stop)) - self._start = stop - return group - - next = __next__ diff --git a/lib/iris/cube.py b/lib/iris/cube.py index af405ebe20..40e50da4ff 100644 --- a/lib/iris/cube.py +++ b/lib/iris/cube.py @@ -1,110 +1,68 @@ -# (C) British Crown Copyright 2010 - 2019, Met Office +# Copyright Iris contributors # -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . - -""" -Classes for representing multi-dimensional data with metadata. - -""" - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa -import six - -from collections import namedtuple, OrderedDict -try: # Python 3 - from collections.abc import (Iterable, - Container, - Mapping, - MutableMapping, - Iterator) -except ImportError: # Python 2.7 - from collections import (Iterable, - Container, - Mapping, - MutableMapping, - Iterator) +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. + +"""Classes for representing multi-dimensional data with metadata.""" + +from __future__ import annotations + +from collections.abc import ( + Container, + Iterable, + Iterator, + Mapping, + MutableMapping, +) import copy from copy import deepcopy -import datetime -from functools import reduce, partial +from functools import partial, reduce +import itertools import operator +from typing import TYPE_CHECKING, Any, Optional, TypeGuard import warnings from xml.dom.minidom import Document import zlib +from cf_units import Unit import dask.array as da import numpy as np import numpy.ma as ma -from iris._cube_coord_common import CFVariableMixin -import iris._concatenate import iris._constraints from iris._data_manager import DataManager import iris._lazy_data as _lazy - import iris._merge import iris.analysis +from iris.analysis import _Weights from iris.analysis.cartography import wrap_lons import iris.analysis.maths import iris.aux_factory +from iris.aux_factory import AuxCoordFactory +from iris.common import CFVariableMixin, CubeMetadata, metadata_manager_factory +from iris.common.metadata import CoordMetadata, metadata_filter +from iris.common.mixin import LimitedAttributeDict import iris.coord_systems import iris.coords +from iris.coords import AncillaryVariable, AuxCoord, CellMeasure, CellMethod, DimCoord + +if TYPE_CHECKING: + import iris.mesh + from iris.mesh import MeshCoord import iris.exceptions import iris.util +import iris.warnings - -__all__ = ['Cube', 'CubeList', 'CubeMetadata'] - - -class CubeMetadata(namedtuple('CubeMetadata', - ['standard_name', - 'long_name', - 'var_name', - 'units', - 'attributes', - 'cell_methods'])): - """ - Represents the phenomenon metadata for a single :class:`Cube`. - - """ - - __slots__ = () - - def name(self, default='unknown'): - """ - Returns a human-readable name. - - First it tries self.standard_name, then it tries the 'long_name' - attribute, then the 'var_name' attribute, before falling back to - the value of `default` (which itself defaults to 'unknown'). - - """ - return self.standard_name or self.long_name or self.var_name or default +__all__ = ["Cube", "CubeAttrsDict", "CubeList"] # The XML namespace to use for CubeML documents XML_NAMESPACE_URI = "urn:x-iris:cubeml-0.2" -class _CubeFilter(object): - """ - A constraint, paired with a list of cubes matching that constraint. +class _CubeFilter: + """A constraint, paired with a list of cubes matching that constraint.""" - """ def __init__(self, constraint, cubes=None): self.constraint = constraint if cubes is None: @@ -115,23 +73,17 @@ def __len__(self): return len(self.cubes) def add(self, cube): - """ - Adds the appropriate (sub)cube to the list of cubes where it - matches the constraint. - - """ + """Add the appropriate (sub)cube to the list of cubes where it matches the constraint.""" sub_cube = self.constraint.extract(cube) if sub_cube is not None: self.cubes.append(sub_cube) def merged(self, unique=False): - """ - Returns a new :class:`_CubeFilter` by merging the list of - cubes. - - Kwargs: + """Return a new :class:`_CubeFilter` by merging the list of cubes. - * unique: + Parameters + ---------- + unique : bool, default=False If True, raises `iris.exceptions.DuplicateDataError` if duplicate cubes are detected. @@ -139,125 +91,143 @@ def merged(self, unique=False): return _CubeFilter(self.constraint, self.cubes.merge(unique)) -class _CubeFilterCollection(object): - """ - A list of _CubeFilter instances. +class _CubeFilterCollection: + """A list of _CubeFilter instances.""" - """ @staticmethod def from_cubes(cubes, constraints=None): - """ - Creates a new collection from an iterable of cubes, and some - optional constraints. - - """ + """Create a new collection from an iterable of cubes, and some optional constraints.""" constraints = iris._constraints.list_of_constraints(constraints) pairs = [_CubeFilter(constraint) for constraint in constraints] collection = _CubeFilterCollection(pairs) - for cube in cubes: - collection.add_cube(cube) + for c in cubes: + collection.add_cube(c) return collection def __init__(self, pairs): self.pairs = pairs def add_cube(self, cube): - """ - Adds the given :class:`~iris.cube.Cube` to all of the relevant - constraint pairs. - - """ + """Add the given :class:`~iris.cube.Cube` to all of the relevant constraint pairs.""" for pair in self.pairs: pair.add(cube) def cubes(self): - """ - Returns all the cubes in this collection concatenated into a - single :class:`CubeList`. - - """ + """Return all the cubes in this collection concatenated into a single :class:`CubeList`.""" result = CubeList() for pair in self.pairs: result.extend(pair.cubes) return result def merged(self, unique=False): - """ - Returns a new :class:`_CubeFilterCollection` by merging all the cube - lists of this collection. - - Kwargs: + """Return a new :class:`_CubeFilterCollection` by merging all the cube lists of this collection. - * unique: + Parameters + ---------- + unique : bool, default=False If True, raises `iris.exceptions.DuplicateDataError` if duplicate cubes are detected. """ - return _CubeFilterCollection([pair.merged(unique) for pair in - self.pairs]) + return _CubeFilterCollection([pair.merged(unique) for pair in self.pairs]) class CubeList(list): - """ - All the functionality of a standard :class:`list` with added "Cube" - context. - - """ - - def __new__(cls, list_of_cubes=None): - """Given a :class:`list` of cubes, return a CubeList instance.""" - cube_list = list.__new__(cls, list_of_cubes) + """All the functionality of a standard :class:`list` with added "Cube" context.""" - # Check that all items in the incoming list are cubes. Note that this - # checking does not guarantee that a CubeList instance *always* has - # just cubes in its list as the append & __getitem__ methods have not - # been overridden. - if not all([isinstance(cube, Cube) for cube in cube_list]): - raise ValueError('All items in list_of_cubes must be Cube ' - 'instances.') - return cube_list + def __init__(self, *args, **kwargs): + """Given an iterable of cubes, return a CubeList instance.""" + # Do whatever a list does, to initialise ourself "as a list" + super().__init__(*args, **kwargs) + # Check that all items in the list are cubes. + for c in self: + self._assert_is_cube(c) def __str__(self): - """Runs short :meth:`Cube.summary` on every cube.""" - result = ['%s: %s' % (i, cube.summary(shorten=True)) for i, cube in - enumerate(self)] + """Run short :meth:`Cube.summary` on every cube.""" + result = [ + "%s: %s" % (i, cube.summary(shorten=True)) for i, cube in enumerate(self) + ] if result: - result = '\n'.join(result) + result = "\n".join(result) else: - result = '< No cubes >' + result = "< No cubes >" return result def __repr__(self): - """Runs repr on every cube.""" - return '[%s]' % ',\n'.join([repr(cube) for cube in self]) + """Run repr on every cube.""" + return "[%s]" % ",\n".join([repr(cube) for cube in self]) + + @staticmethod + def _assert_is_cube(obj): + if not hasattr(obj, "add_aux_coord"): + msg = r"Object {obj} cannot be put in a cubelist, as it is not a Cube." + raise ValueError(msg) def _repr_html_(self): from iris.experimental.representation import CubeListRepresentation + representer = CubeListRepresentation(self) return representer.repr_html() # TODO #370 Which operators need overloads? + def __add__(self, other): return CubeList(list.__add__(self, other)) - def __getitem__(self, keys): - """x.__getitem__(y) <==> x[y]""" - result = super(CubeList, self).__getitem__(keys) + def __getitem__(self, keys): # numpydoc ignore=SS02 + """x.__getitem__(y) <==> x[y].""" + result = super().__getitem__(keys) if isinstance(result, list): result = CubeList(result) return result - def __getslice__(self, start, stop): - """ - x.__getslice__(i, j) <==> x[i:j] + def __getslice__(self, start, stop): # numpydoc ignore=SS02 + """x.__getslice__(i, j) <==> x[i:j]. Use of negative indices is not supported. """ - result = super(CubeList, self).__getslice__(start, stop) + result = super().__getslice__(start, stop) result = CubeList(result) return result + def __iadd__(self, other_cubes): + """Add a sequence of cubes to the cubelist in place.""" + return super(CubeList, self).__iadd__(CubeList(other_cubes)) + + def __setitem__(self, key, cube_or_sequence): + """Set self[key] to cube or sequence of cubes.""" + if isinstance(key, int): + # should have single cube. + self._assert_is_cube(cube_or_sequence) + else: + # key is a slice (or exception will come from list method). + cube_or_sequence = CubeList(cube_or_sequence) + + super(CubeList, self).__setitem__(key, cube_or_sequence) + + def append(self, cube): + """Append a cube.""" + self._assert_is_cube(cube) + super(CubeList, self).append(cube) + + def extend(self, other_cubes): + """Extend cubelist by appending the cubes contained in other_cubes. + + Parameters + ---------- + other_cubes : + A cubelist or other sequence of cubes. + + """ + super(CubeList, self).extend(CubeList(other_cubes)) + + def insert(self, index, cube): + """Insert a cube before index.""" + self._assert_is_cube(cube) + super(CubeList, self).insert(index, cube) + def xml(self, checksum=False, order=True, byteorder=True): """Return a string of the XML that this list of cubes represents.""" doc = Document() @@ -267,105 +237,145 @@ def xml(self, checksum=False, order=True, byteorder=True): for cube_obj in self: cubes_xml_element.appendChild( cube_obj._xml_element( - doc, checksum=checksum, order=order, byteorder=byteorder)) + doc, checksum=checksum, order=order, byteorder=byteorder + ) + ) doc.appendChild(cubes_xml_element) # return our newly created XML string + doc = Cube._sort_xml_attrs(doc) return doc.toprettyxml(indent=" ") - def extract(self, constraints, strict=False): - """ - Filter each of the cubes which can be filtered by the given - constraints. + def extract(self, constraints): + """Filter each of the cubes which can be filtered by the given constraints. This method iterates over each constraint given, and subsets each of the cubes in this CubeList where possible. Thus, a CubeList of length **n** when filtered with **m** constraints can generate a maximum of **m * n** cubes. - Keywords: + Parameters + ---------- + constraints : :class:`~iris.Constraint` or iterable of constraints + A single constraint or an iterable. + + """ + return self._extract_and_merge(self, constraints, strict=False) + + def extract_cube(self, constraint): + """Extract a single cube from a CubeList, and return it. + + Extract a single cube from a CubeList, and return it. + Raise an error if the extract produces no cubes, or more than one. - * strict - boolean - If strict is True, then there must be exactly one cube which is - filtered per constraint. + Parameters + ---------- + constraint : :class:`~iris.Constraint` + The constraint to extract with. + + See Also + -------- + iris.cube.CubeList.extract : + Filter each of the cubes which can be filtered by the given constraints. """ - return self._extract_and_merge(self, constraints, strict, - merge_unique=None) + # Just validate this, so we can accept strings etc, but not multiples. + constraint = iris._constraints.as_constraint(constraint) + return self._extract_and_merge( + self, constraint, strict=True, return_single_cube=True + ) - @staticmethod - def _extract_and_merge(cubes, constraints, strict, merge_unique=False): - # * merge_unique - if None: no merging, if false: non unique merging, - # else unique merging (see merge) + def extract_cubes(self, constraints): + """Extract specific cubes from a CubeList, one for each given constraint. + + Extract specific cubes from a CubeList, one for each given constraint. + Each constraint must produce exactly one cube, otherwise an error is + raised. + + Parameters + ---------- + constraints : iter of, or single, :class:`~iris.Constraint` + The constraints to extract with. + + See Also + -------- + iris.cube.CubeList.extract : + Filter each of the cubes which can be filtered by the given constraints. + + """ + return self._extract_and_merge( + self, constraints, strict=True, return_single_cube=False + ) + @staticmethod + def _extract_and_merge(cubes, constraints, strict=False, return_single_cube=False): constraints = iris._constraints.list_of_constraints(constraints) # group the resultant cubes by constraints in a dictionary - constraint_groups = dict([(constraint, CubeList()) for constraint in - constraints]) - for cube in cubes: - for constraint, cube_list in six.iteritems(constraint_groups): - sub_cube = constraint.extract(cube) + constraint_groups = dict( + [(constraint, CubeList()) for constraint in constraints] + ) + for c in cubes: + for constraint, cube_list in constraint_groups.items(): + sub_cube = constraint.extract(c) if sub_cube is not None: cube_list.append(sub_cube) - if merge_unique is not None: - for constraint, cubelist in six.iteritems(constraint_groups): - constraint_groups[constraint] = cubelist.merge(merge_unique) - result = CubeList() for constraint in constraints: constraint_cubes = constraint_groups[constraint] if strict and len(constraint_cubes) != 1: - msg = 'Got %s cubes for constraint %r, ' \ - 'expecting 1.' % (len(constraint_cubes), constraint) + msg = "Got %s cubes for constraint %r, expecting 1." % ( + len(constraint_cubes), + constraint, + ) raise iris.exceptions.ConstraintMismatchError(msg) result.extend(constraint_cubes) - if strict and len(constraints) == 1: + if return_single_cube: + if len(result) != 1: + # Practically this should never occur, as we now *only* request + # single cube result for 'extract_cube'. + msg = "Got {!s} cubes for constraints {!r}, expecting 1." + raise iris.exceptions.ConstraintMismatchError( + msg.format(len(result), constraints) + ) result = result[0] return result - def extract_strict(self, constraints): - """ - Calls :meth:`CubeList.extract` with the strict keyword set to True. - - """ - return self.extract(constraints, strict=True) - def extract_overlapping(self, coord_names): - """ - Returns a :class:`CubeList` of cubes extracted over regions + """Return a :class:`CubeList` of cubes extracted over regions. + + Return a :class:`CubeList` of cubes extracted over regions where the coordinates overlap, for the coordinates in coord_names. - Args: - - * coord_names: + Parameters + ---------- + coord_names : str or list of str A string or list of strings of the names of the coordinates over which to perform the extraction. """ - if isinstance(coord_names, six.string_types): + if isinstance(coord_names, str): coord_names = [coord_names] def make_overlap_fn(coord_name): def overlap_fn(cell): - return all(cell in cube.coord(coord_name).cells() - for cube in self) + return all(cell in cube.coord(coord_name).cells() for cube in self) + return overlap_fn - coord_values = {coord_name: make_overlap_fn(coord_name) - for coord_name in coord_names} + coord_values = { + coord_name: make_overlap_fn(coord_name) for coord_name in coord_names + } return self.extract(iris.Constraint(coord_values=coord_values)) def merge_cube(self): - """ - Return the merged contents of the :class:`CubeList` as a single - :class:`Cube`. + """Return the merged contents of the :class:`CubeList` as a single :class:`Cube`. If it is not possible to merge the `CubeList` into a single `Cube`, a :class:`~iris.exceptions.MergeError` will be raised @@ -392,24 +402,24 @@ def merge_cube(self): # Register each of our cubes with a single ProtoCube. proto_cube = iris._merge.ProtoCube(self[0]) - for cube in self[1:]: - proto_cube.register(cube, error_on_mismatch=True) + for c in self[1:]: + proto_cube.register(c, error_on_mismatch=True) # Extract the merged cube from the ProtoCube. - merged_cube, = proto_cube.merge() + (merged_cube,) = proto_cube.merge() return merged_cube def merge(self, unique=True): - """ - Returns the :class:`CubeList` resulting from merging this - :class:`CubeList`. - - Kwargs: + """Return the :class:`CubeList` resulting from merging this :class:`CubeList`. - * unique: + Parameters + ---------- + unique : bool, default=True If True, raises `iris.exceptions.DuplicateDataError` if duplicate cubes are detected. + Examples + -------- This combines cubes with different values of an auxiliary scalar coordinate, by constructing a new dimension. @@ -423,7 +433,7 @@ def merge(self, unique=True): c2 = c1.copy() c2.coord('y_vals').points = [200] - For example:: + :: >>> print(c1) some_parameter / (unknown) (x_vals: 3) @@ -451,6 +461,14 @@ def merge(self, unique=True): Contrast this with :meth:`iris.cube.CubeList.concatenate`, which joins cubes along an existing dimension. + .. note:: + + Cubes may contain additional dimensional elements such as auxiliary + coordinates, cell measures or ancillary variables. + A group of similar cubes can only merge to a single result if all such + elements are identical in every input cube : they are then present, + unchanged, in the merged output cube. + .. note:: If time coordinates in the list of cubes have differing epochs then @@ -461,18 +479,18 @@ def merge(self, unique=True): """ # Register each of our cubes with its appropriate ProtoCube. proto_cubes_by_name = {} - for cube in self: - name = cube.standard_name + for c in self: + name = c.standard_name proto_cubes = proto_cubes_by_name.setdefault(name, []) proto_cube = None for target_proto_cube in proto_cubes: - if target_proto_cube.register(cube): + if target_proto_cube.register(c): proto_cube = target_proto_cube break if proto_cube is None: - proto_cube = iris._merge.ProtoCube(cube) + proto_cube = iris._merge.ProtoCube(c) proto_cubes.append(proto_cube) # Emulate Python 2 behaviour. @@ -487,66 +505,124 @@ def _none_sort(item): return merged_cubes - def concatenate_cube(self, check_aux_coords=True): - """ - Return the concatenated contents of the :class:`CubeList` as a single - :class:`Cube`. + def concatenate_cube( + self, + check_aux_coords=True, + check_cell_measures=True, + check_ancils=True, + check_derived_coords=True, + ): + """Return the concatenated contents of the :class:`CubeList` as a single :class:`Cube`. If it is not possible to concatenate the `CubeList` into a single `Cube`, a :class:`~iris.exceptions.ConcatenateError` will be raised describing the reason for the failure. - Kwargs: - - * check_aux_coords - Checks the auxilliary coordinates of the cubes match. This check - is not applied to auxilliary coordinates that span the dimension - the concatenation is occuring along. Defaults to True. - + Parameters + ---------- + check_aux_coords : bool, default=True + Checks if the points and bounds of auxiliary coordinates of the + cubes match. This check is not applied to auxiliary coordinates + that span the dimension the concatenation is occurring along. + Defaults to True. + check_cell_measures : bool, default=True + Checks if the data of cell measures of the cubes match. This check + is not applied to cell measures that span the dimension the + concatenation is occurring along. Defaults to True. + check_ancils : bool, default=True + Checks if the data of ancillary variables of the cubes match. This + check is not applied to ancillary variables that span the dimension + the concatenation is occurring along. Defaults to True. + check_derived_coords : bool, default=True + Checks if the points and bounds of derived coordinates of the cubes + match. This check is not applied to derived coordinates that span + the dimension the concatenation is occurring along. Note that + differences in scalar coordinates and dimensional coordinates used + to derive the coordinate are still checked. Checks for auxiliary + coordinates used to derive the coordinates can be ignored with + `check_aux_coords`. Defaults to True. + + Notes + ----- .. note:: Concatenation cannot occur along an anonymous dimension. """ + from iris._concatenate import concatenate + if not self: raise ValueError("can't concatenate an empty CubeList") names = [cube.metadata.name() for cube in self] - unique_names = list(OrderedDict.fromkeys(names)) + unique_names = list(dict.fromkeys(names)) if len(unique_names) == 1: - res = iris._concatenate.concatenate( - self, error_on_mismatch=True, - check_aux_coords=check_aux_coords) + res = concatenate( + self, + error_on_mismatch=True, + check_aux_coords=check_aux_coords, + check_cell_measures=check_cell_measures, + check_ancils=check_ancils, + check_derived_coords=check_derived_coords, + ) n_res_cubes = len(res) if n_res_cubes == 1: return res[0] else: msgs = [] - msgs.append('An unexpected problem prevented concatenation.') - msgs.append('Expected only a single cube, ' - 'found {}.'.format(n_res_cubes)) + msgs.append("An unexpected problem prevented concatenation.") + msgs.append( + "Expected only a single cube, found {}.".format(n_res_cubes) + ) raise iris.exceptions.ConcatenateError(msgs) else: msgs = [] - msgs.append('Cube names differ: {} != {}'.format(names[0], - names[1])) + msgs.append( + "Cube names differ: {} != {}".format(unique_names[0], unique_names[1]) + ) raise iris.exceptions.ConcatenateError(msgs) - def concatenate(self, check_aux_coords=True): - """ - Concatenate the cubes over their common dimensions. - - Kwargs: - - * check_aux_coords - Checks the auxilliary coordinates of the cubes match. This check - is not applied to auxilliary coordinates that span the dimension - the concatenation is occuring along. Defaults to True. - - Returns: + def concatenate( + self, + check_aux_coords=True, + check_cell_measures=True, + check_ancils=True, + check_derived_coords=True, + ): + """Concatenate the cubes over their common dimensions. + + Parameters + ---------- + check_aux_coords : bool, default=True + Checks if the points and bounds of auxiliary coordinates of the + cubes match. This check is not applied to auxiliary coordinates + that span the dimension the concatenation is occurring along. + Defaults to True. + check_cell_measures : bool, default=True + Checks if the data of cell measures of the cubes match. This check + is not applied to cell measures that span the dimension the + concatenation is occurring along. Defaults to True. + check_ancils : bool, default=True + Checks if the data of ancillary variables of the cubes match. This + check is not applied to ancillary variables that span the dimension + the concatenation is occurring along. Defaults to True. + check_derived_coords : bool, default=True + Checks if the points and bounds of derived coordinates of the cubes + match. This check is not applied to derived coordinates that span + the dimension the concatenation is occurring along. Note that + differences in scalar coordinates and dimensional coordinates used + to derive the coordinate are still checked. Checks for auxiliary + coordinates used to derive the coordinates can be ignored with + `check_aux_coords`. Defaults to True. + + Returns + ------- + :class:`iris.cube.CubeList` A new :class:`iris.cube.CubeList` of concatenated :class:`iris.cube.Cube` instances. + Notes + ----- This combines cubes with a common dimension coordinate, but occupying different regions of the coordinate value. The cubes are joined across that dimension. @@ -595,6 +671,19 @@ def concatenate(self, check_aux_coords=True): Contrast this with :meth:`iris.cube.CubeList.merge`, which makes a new dimension from values of an auxiliary scalar coordinate. + .. note:: + + Cubes may contain 'extra' dimensional elements such as auxiliary + coordinates, cell measures or ancillary variables. + For a group of similar cubes to concatenate together into one output, all + such elements which do not map to the concatenation axis must be identical + in every input cube : these then appear unchanged in the output. + Similarly, those elements which *do* map to the concatenation axis must + have matching properties, but may have different data values : these then + appear, concatenated, in the output cube. + If any cubes in a group have dimensional elements which do not match + correctly, the group will not concatenate to a single output cube. + .. note:: If time coordinates in the list of cubes have differing epochs then @@ -607,12 +696,18 @@ def concatenate(self, check_aux_coords=True): Concatenation cannot occur along an anonymous dimension. """ - return iris._concatenate.concatenate(self, - check_aux_coords=check_aux_coords) + from iris._concatenate import concatenate + + return concatenate( + self, + check_aux_coords=check_aux_coords, + check_cell_measures=check_cell_measures, + check_ancils=check_ancils, + check_derived_coords=check_derived_coords, + ) def realise_data(self): - """ - Fetch 'real' data for all cubes, in a shared calculation. + """Fetch 'real' data for all cubes, in a shared calculation. This computes any lazy data, equivalent to accessing each `cube.data`. However, lazy calculations and data fetches can be shared between the @@ -630,28 +725,367 @@ def realise_data(self): # Compute these stats together (avoiding multiple data passes). CubeList([a_std, b_std, ab_mean_diff, std_err]).realise_data() - .. Note:: + .. note:: Cubes with non-lazy data are not affected. """ _lazy.co_realise_cubes(*self) + def copy(self): + """Return a CubeList when CubeList.copy() is called.""" + if isinstance(self, CubeList): + return deepcopy(self) -def _is_single_item(testee): - """ - Return whether this is a single item, rather than an iterable. + +def _is_single_item(testee) -> TypeGuard[str | AuxCoord | DimCoord | int]: + """Return whether this is a single item, rather than an iterable. We count string types as 'single', also. """ - return (isinstance(testee, six.string_types) or - not isinstance(testee, Iterable)) + return isinstance(testee, str) or not isinstance(testee, Iterable) + + +class CubeAttrsDict(MutableMapping): + """A :class:`dict`-like object for :attr:`iris.cube.Cube.attributes`. + + A :class:`dict`-like object for :attr:`iris.cube.Cube.attributes`, + providing unified user access to combined cube "local" and "global" attributes + dictionaries, with the access behaviour of an ordinary (single) dictionary. + + Properties :attr:`globals` and :attr:`locals` are regular + :class:`~iris.common.mixin.LimitedAttributeDict`, which can be accessed and + modified separately. The :class:`CubeAttrsDict` itself contains *no* additional + state, but simply provides a 'combined' view of both global + local attributes. + + All the read- and write-type methods, such as ``get()``, ``update()``, ``values()``, + behave according to the logic documented for : :meth:`__getitem__`, + :meth:`__setitem__` and :meth:`__iter__`. + + Notes + ----- + For type testing, ``issubclass(CubeAttrsDict, Mapping)`` is ``True``, but + ``issubclass(CubeAttrsDict, dict)`` is ``False``. + + Examples + -------- + >>> from iris.cube import Cube + >>> cube = Cube([0]) + >>> # CF defines 'history' as global by default. + >>> cube.attributes.update({"history": "from test-123", "mycode": 3}) + >>> print(cube.attributes) + {'history': 'from test-123', 'mycode': 3} + >>> print(repr(cube.attributes)) + CubeAttrsDict(globals={'history': 'from test-123'}, locals={'mycode': 3}) + + >>> cube.attributes['history'] += ' +added' + >>> print(repr(cube.attributes)) + CubeAttrsDict(globals={'history': 'from test-123 +added'}, locals={'mycode': 3}) + + >>> cube.attributes.locals['history'] = 'per-variable' + >>> print(cube.attributes) + {'history': 'per-variable', 'mycode': 3} + >>> print(repr(cube.attributes)) + CubeAttrsDict(globals={'history': 'from test-123 +added'}, locals={'mycode': 3, 'history': 'per-variable'}) + + """ + + # TODO: Create a 'further topic' / 'tech paper' on NetCDF I/O, including + # discussion of attribute handling. + + def __init__( + self, + combined: Optional[Mapping] = None, + locals: Optional[Mapping] = None, + globals: Optional[Mapping] = None, + ): + """Create a cube attributes dictionary. + + We support initialisation from a single generic mapping input, using the default + global/local assignment rules explained at :meth:`__setattr__`, or from + two separate mappings. Two separate dicts can be passed in the ``locals`` + and ``globals`` args, **or** via a ``combined`` arg which has its own + ``.globals`` and ``.locals`` properties -- so this allows passing an existing + :class:`CubeAttrsDict`, which will be copied. + + Parameters + ---------- + combined : dict + Values to init both 'self.globals' and 'self.locals'. If 'combined' itself + has attributes named 'locals' and 'globals', these are used to update the + respective content (after initially setting the individual ones). + Otherwise, 'combined' is treated as a generic mapping, applied as + ``self.update(combined)``, + i.e. it will set locals and/or globals with the same logic as + :meth:`~iris.cube.CubeAttrsDict.__setitem__` . + locals : dict + Initial content for 'self.locals'. + globals : dict + Initial content for 'self.globals'. + + Examples + -------- + >>> from iris.cube import CubeAttrsDict + >>> # CF defines 'history' as global by default. + >>> CubeAttrsDict({'history': 'data-story', 'comment': 'this-cube'}) + CubeAttrsDict(globals={'history': 'data-story'}, locals={'comment': 'this-cube'}) + + >>> CubeAttrsDict(locals={'history': 'local-history'}) + CubeAttrsDict(globals={}, locals={'history': 'local-history'}) + + >>> CubeAttrsDict(globals={'x': 'global'}, locals={'x': 'local'}) + CubeAttrsDict(globals={'x': 'global'}, locals={'x': 'local'}) + + >>> x1 = CubeAttrsDict(globals={'x': 1}, locals={'y': 2}) + >>> x2 = CubeAttrsDict(x1) + >>> x2 + CubeAttrsDict(globals={'x': 1}, locals={'y': 2}) + + """ + # First initialise locals + globals, defaulting to empty. + # See https://github.com/python/mypy/issues/3004 + self.locals = locals # type: ignore[assignment] + self.globals = globals # type: ignore[assignment] + # Update with combined, if present. + if combined is not None: + # Treat a single input with 'locals' and 'globals' properties as an + # existing CubeAttrsDict, and update from its content. + # N.B. enforce deep copying, consistent with general Iris usage. + if hasattr(combined, "globals") and hasattr(combined, "locals"): + # Copy a mapping with globals/locals, like another 'CubeAttrsDict' + self.globals.update(deepcopy(combined.globals)) + self.locals.update(deepcopy(combined.locals)) + else: + # Treat any arbitrary single input value as a mapping (dict), and + # update from it. + self.update(dict(deepcopy(combined))) + + # + # Ensure that the stored local/global dictionaries are "LimitedAttributeDicts". + # + @staticmethod + def _normalise_attrs( + attributes: Optional[Mapping], + ) -> LimitedAttributeDict: + # Convert an input attributes arg into a standard form. + # N.B. content is always a LimitedAttributeDict, and a deep copy of input. + # Allow arg of None, etc. + if not attributes: + attributes = {} + else: + attributes = deepcopy(attributes) + + # Ensure the expected mapping type. + attributes = LimitedAttributeDict(attributes) + return attributes + + @property + def locals(self) -> LimitedAttributeDict: + return self._locals + + @locals.setter + def locals(self, attributes: Optional[Mapping]): + self._locals = self._normalise_attrs(attributes) + + @property + def globals(self) -> LimitedAttributeDict: + return self._globals + + @globals.setter + def globals(self, attributes: Optional[Mapping]): + self._globals = self._normalise_attrs(attributes) + + # + # Provide a serialisation interface + # + def __getstate__(self): + return (self.locals, self.globals) + + def __setstate__(self, state): + self.locals, self.globals = state + + # + # Support comparison -- required because default operation only compares a single + # value at each key. + # + def __eq__(self, other): + # For equality, require both globals + locals to match exactly. + # NOTE: array content works correctly, since 'locals' and 'globals' are always + # iris.common.mixin.LimitedAttributeDict, which gets this right. + other = CubeAttrsDict(other) + result = self.locals == other.locals and self.globals == other.globals + return result + + # + # Provide methods duplicating those for a 'dict', but which are *not* provided by + # MutableMapping, for compatibility with code which expected a cube.attributes to be + # a :class:`~iris.common.mixin.LimitedAttributeDict`. + # The extra required methods are : + # 'copy', 'update', '__ior__', '__or__', '__ror__' and 'fromkeys'. + # + def copy(self): + """Return a copy. + + Implemented with deep copying, consistent with general Iris usage. + + """ + return CubeAttrsDict(self) + + def update(self, *args, **kwargs): + """Update by adding items from a mapping arg, or keyword-values. + + If the argument is a split dictionary, preserve the local/global nature of its + keys. + """ + if args and hasattr(args[0], "globals") and hasattr(args[0], "locals"): + dic = args[0] + self.globals.update(dic.globals) + self.locals.update(dic.locals) + else: + super().update(*args) + super().update(**kwargs) + + def __or__(self, arg): + """Implement 'or' via 'update'.""" + if not isinstance(arg, Mapping): + return NotImplemented + new_dict = self.copy() + new_dict.update(arg) + return new_dict + + def __ior__(self, arg): + """Implement 'ior' via 'update'.""" + self.update(arg) + return self + + def __ror__(self, arg): + """Implement 'ror' via 'update'. + + This needs to promote, such that the result is a CubeAttrsDict. + """ + if not isinstance(arg, Mapping): + return NotImplemented + result = CubeAttrsDict(arg) + result.update(self) + return result + + @classmethod + def fromkeys(cls, iterable, value=None): + """Create a new object with keys taken from an argument, all set to one value. + + If the argument is a split dictionary, preserve the local/global nature of its + keys. + """ + if hasattr(iterable, "globals") and hasattr(iterable, "locals"): + # When main input is a split-attrs dict, create global/local parts from its + # global/local keys + result = cls( + globals=dict.fromkeys(iterable.globals, value), + locals=dict.fromkeys(iterable.locals, value), + ) + else: + # Create from a dict.fromkeys, using default classification of the keys. + result = cls(dict.fromkeys(iterable, value)) + return result + + # + # The remaining methods are sufficient to generate a complete standard Mapping + # API. See - + # https://docs.python.org/3/reference/datamodel.html#emulating-container-types. + # + + def __iter__(self): + """Define the combined iteration order. + + Result is: all global keys, then all local ones, but omitting duplicates. + + """ + # NOTE: this means that in the "summary" view, attributes present in both + # locals+globals are listed first, amongst the globals, even though they appear + # with the *value* from locals. + # Otherwise follows order of insertion, as is normal for dicts. + return itertools.chain( + self.globals.keys(), + (x for x in self.locals.keys() if x not in self.globals), + ) + + def __len__(self): + # Return the number of keys in the 'combined' view. + return len(list(iter(self))) + + def __getitem__(self, key): + """Fetch an item from the "combined attributes". + + If the name is present in *both* ``self.locals`` and ``self.globals``, then + the local value is returned. + + """ + if key in self.locals: + store = self.locals + else: + store = self.globals + return store[key] + + def __setitem__(self, key, value): + """Assign an attribute value. + + This may be assigned in either ``self.locals`` or ``self.globals``, chosen as + follows: + + * If there is an existing setting in either ``.locals`` or ``.globals``, then + that is updated (i.e. overwritten). + + * If it is present in *both*, only + ``.locals`` is updated. + + * If there is *no* existing attribute, it is usually created in ``.locals``. + **However** a handful of "known normally global" cases, as defined by CF, + go into ``.globals`` instead. + At present these are : ('conventions', 'featureType', 'history', 'title'). + See `CF Conventions, Appendix A: `_ . + + """ + # If an attribute of this name is already present, update that + # (the local one having priority). + if key in self.locals: + store = self.locals + elif key in self.globals: + store = self.globals + else: + # If NO existing attribute, create local unless it is a "known global" one. + from iris.fileformats.netcdf.saver import _CF_GLOBAL_ATTRS + + if key in _CF_GLOBAL_ATTRS: + store = self.globals + else: + store = self.locals + + store[key] = value + + def __delitem__(self, key): + """Remove an attribute. + + Delete from both local + global. + + """ + if key in self.locals: + del self.locals[key] + if key in self.globals: + del self.globals[key] + + def __str__(self): + # Print it just like a "normal" dictionary. + # Convert to a normal dict to do that. + return str(dict(self)) + + def __repr__(self): + # Special repr form, showing "real" contents. + return f"CubeAttrsDict(globals={self.globals}, locals={self.locals})" class Cube(CFVariableMixin): - """ - A single Iris cube of data and metadata. + """A single Iris cube of data and metadata. Typically obtained from :func:`iris.load`, :func:`iris.load_cube`, :func:`iris.load_cubes`, or from the manipulation of existing cubes. @@ -661,21 +1095,22 @@ class Cube(CFVariableMixin): >>> cube = iris.load_cube(iris.sample_data_path('air_temp.pp')) >>> print(cube) air_temperature / (K) (latitude: 73; longitude: 96) - Dimension coordinates: - latitude x - - longitude - x - Scalar coordinates: - forecast_period: 6477 hours, bound=(-28083.0, 6477.0) hours - forecast_reference_time: 1998-03-01 03:00:00 - pressure: 1000.0 hPa - time: 1998-12-01 00:00:00, \ -bound=(1994-12-01 00:00:00, 1998-12-01 00:00:00) - Attributes: - STASH: m01s16i203 - source: Data from Met Office Unified Model - Cell methods: - mean within years: time - mean over years: time + Dimension coordinates: + latitude x - + longitude - x + Scalar coordinates: + forecast_period \ +6477 hours, bound=(-28083.0, 6477.0) hours + forecast_reference_time 1998-03-01 03:00:00 + pressure 1000.0 hPa + time \ +1998-12-01 00:00:00, bound=(1994-12-01 00:00:00, 1998-12-01 00:00:00) + Cell methods: + 0 time: mean within years + 1 time: mean over years + Attributes: + STASH m01s16i203 + source 'Data from Met Office Unified Model' See the :doc:`user guide` for more information. @@ -688,59 +1123,127 @@ class Cube(CFVariableMixin): #: is similar to Fortran or Matlab, but different than numpy. __orthogonal_indexing__ = True - def __init__(self, data, standard_name=None, long_name=None, - var_name=None, units=None, attributes=None, - cell_methods=None, dim_coords_and_dims=None, - aux_coords_and_dims=None, aux_factories=None, - cell_measures_and_dims=None): + @classmethod + def _sort_xml_attrs(cls, doc): + """Return a copy with all element attributes sorted in alphabetical order. + + Take an xml document and returns a copy with all element + attributes sorted in alphabetical order. + + This is a private utility method required by iris to maintain + legacy xml behaviour beyond python 3.7. + + Parameters + ---------- + doc : :class:`xml.dom.minidom.Document` + + Returns + ------- + :class:`xml.dom.minidom.Document` + The :class:`xml.dom.minidom.Document` with sorted element + attributes. + """ - Creates a cube with data and optional metadata. + from xml.dom.minidom import Document + + def _walk_nodes(node): + """Note: _walk_nodes is called recursively on child elements.""" + # we don't want to copy the children here, so take a shallow copy + new_node = node.cloneNode(deep=False) + + # Versions of python <3.8 order attributes in alphabetical order. + # Python >=3.8 order attributes in insert order. For consistent behaviour + # across both, we'll go with alphabetical order always. + # Remove all the attribute nodes, then add back in alphabetical order. + attrs = [ + new_node.getAttributeNode(attr_name).cloneNode(deep=True) + for attr_name in sorted(node.attributes.keys()) + ] + for attr in attrs: + new_node.removeAttributeNode(attr) + for attr in attrs: + new_node.setAttributeNode(attr) + + if node.childNodes: + children = [_walk_nodes(x) for x in node.childNodes] + for c in children: + new_node.appendChild(c) + + return new_node + + nodes = _walk_nodes(doc.documentElement) + new_doc = Document() + new_doc.appendChild(nodes) + + return new_doc + + def __init__( + self, + data: np.typing.ArrayLike, + standard_name: str | None = None, + long_name: str | None = None, + var_name: str | None = None, + units: Unit | str | None = None, + attributes: Mapping | None = None, + cell_methods: Iterable[CellMethod] | None = None, + dim_coords_and_dims: Iterable[tuple[DimCoord, int]] | None = None, + aux_coords_and_dims: Iterable[tuple[AuxCoord, int | Iterable[int]]] + | None = None, + aux_factories: Iterable[AuxCoordFactory] | None = None, + cell_measures_and_dims: Iterable[tuple[CellMeasure, int]] | None = None, + ancillary_variables_and_dims: Iterable[tuple[AncillaryVariable, int]] + | None = None, + ): + """Create a cube with data and optional metadata. Not typically used - normally cubes are obtained by loading data (e.g. :func:`iris.load`) or from manipulating existing cubes. - Args: - - * data + Parameters + ---------- + data : This object defines the shape of the cube and the phenomenon value in each cell. - ``data`` can be a dask array, a NumPy array, a NumPy array + ``data`` can be a :class:`dask.array.Array`, a + :class:`numpy.ndarray`, a NumPy array subclass (such as :class:`numpy.ma.MaskedArray`), or array_like (as described in :func:`numpy.asarray`). See :attr:`Cube.data`. - - Kwargs: - - * standard_name + standard_name : The standard name for the Cube's data. - * long_name + long_name : An unconstrained description of the cube. - * var_name - The netCDF variable name for the cube. - * units + var_name : + The NetCDF variable name for the cube. + units : The unit of the cube, e.g. ``"m s-1"`` or ``"kelvin"``. - * attributes - A dictionary of cube attributes - * cell_methods + attributes : + A dictionary of cube attributes. + cell_methods : A tuple of CellMethod objects, generally set by Iris, e.g. ``(CellMethod("mean", coords='latitude'), )``. - * dim_coords_and_dims + dim_coords_and_dims : A list of coordinates with scalar dimension mappings, e.g ``[(lat_coord, 0), (lon_coord, 1)]``. - * aux_coords_and_dims + aux_coords_and_dims : A list of coordinates with dimension mappings, e.g ``[(lat_coord, 0), (lon_coord, (0, 1))]``. See also :meth:`Cube.add_dim_coord()` and :meth:`Cube.add_aux_coord()`. - * aux_factories + aux_factories : A list of auxiliary coordinate factories. See :mod:`iris.aux_factory`. - * cell_measures_and_dims + cell_measures_and_dims : A list of CellMeasures with dimension mappings. + ancillary_variables_and_dims : + A list of AncillaryVariables with dimension mappings. + + Examples + -------- + :: - For example:: >>> from iris.coords import DimCoord >>> from iris.cube import Cube >>> latitude = DimCoord(np.linspace(-90, 90, 4), @@ -755,8 +1258,11 @@ def __init__(self, data, standard_name=None, long_name=None, """ # Temporary error while we transition the API. - if isinstance(data, six.string_types): - raise TypeError('Invalid data type: {!r}.'.format(data)) + if isinstance(data, str): + raise TypeError("Invalid data type: {!r}.".format(data)) + + # Configure the metadata manager. + self._metadata_manager = metadata_manager_factory(CubeMetadata) # Initialise the cube data manager. self._data_manager = DataManager(data) @@ -770,22 +1276,31 @@ def __init__(self, data, standard_name=None, long_name=None, #: The "long name" for the Cube's phenomenon. self.long_name = long_name - #: The netCDF variable name for the Cube. + #: The NetCDF variable name for the Cube. self.var_name = var_name - self.cell_methods = cell_methods + # See https://github.com/python/mypy/issues/3004. + self.cell_methods = cell_methods # type: ignore[assignment] - #: A dictionary, with a few restricted keys, for arbitrary - #: Cube metadata. - self.attributes = attributes + #: A dictionary for arbitrary Cube metadata. + #: A few keys are restricted - see :class:`CubeAttrsDict`. + # See https://github.com/python/mypy/issues/3004. + self.attributes = attributes # type: ignore[assignment] # Coords - self._dim_coords_and_dims = [] - self._aux_coords_and_dims = [] - self._aux_factories = [] + self._dim_coords_and_dims: list[tuple[DimCoord, int]] = [] + self._aux_coords_and_dims: list[ + tuple[AuxCoord | DimCoord, tuple[int, ...]] + ] = [] + self._aux_factories: list[AuxCoordFactory] = [] # Cell Measures - self._cell_measures_and_dims = [] + self._cell_measures_and_dims: list[tuple[CellMeasure, tuple[int, ...]]] = [] + + # Ancillary Variables + self._ancillary_variables_and_dims: list[ + tuple[AncillaryVariable, tuple[int, ...]] + ] = [] identities = set() if dim_coords_and_dims: @@ -800,12 +1315,12 @@ def __init__(self, data, standard_name=None, long_name=None, dims.add(dim) if aux_coords_and_dims: - for coord, dims in aux_coords_and_dims: - identity = coord.standard_name, coord.long_name + for auxcoord, auxdims in aux_coords_and_dims: + identity = auxcoord.standard_name, auxcoord.long_name if identity not in identities: - self._add_unique_aux_coord(coord, dims) + self._add_unique_aux_coord(auxcoord, auxdims) else: - self.add_aux_coord(coord, dims) + self.add_aux_coord(auxcoord, auxdims) identities.add(identity) if aux_factories: @@ -813,60 +1328,92 @@ def __init__(self, data, standard_name=None, long_name=None, self.add_aux_factory(factory) if cell_measures_and_dims: - for cell_measure, dims in cell_measures_and_dims: - self.add_cell_measure(cell_measure, dims) + for cell_measure, cmdims in cell_measures_and_dims: + self.add_cell_measure(cell_measure, cmdims) + + if ancillary_variables_and_dims: + for ancillary_variable, avdims in ancillary_variables_and_dims: + self.add_ancillary_variable(ancillary_variable, avdims) @property - def metadata(self): + def _names(self) -> tuple[str | None, str | None, str | None, str | None]: + """Tuple containing the value of each name participating in the identity of a :class:`iris.cube.Cube`. + + A tuple containing the value of each name participating in the identity + of a :class:`iris.cube.Cube`. This includes the standard name, + long name, NetCDF variable name, and the STASH from the attributes + dictionary. + """ - An instance of :class:`CubeMetadata` describing the phenomenon. + return self._metadata_manager._names + + # + # Ensure that .attributes is always a :class:`CubeAttrsDict`. + # + @property # type: ignore[override] + def attributes(self) -> CubeAttrsDict: + return super().attributes # type: ignore[return-value] + + @attributes.setter + def attributes(self, attributes: Mapping | None) -> None: + """Override to CfVariableMixin.attributes.setter. - This property can be updated with any of: - - another :class:`CubeMetadata` instance, - - a tuple/dict which can be used to make a :class:`CubeMetadata`, - - or any object providing the attributes exposed by - :class:`CubeMetadata`. + An override to CfVariableMixin.attributes.setter, which ensures that Cube + attributes are stored in a way which distinguishes global + local ones. """ - return CubeMetadata(self.standard_name, self.long_name, self.var_name, - self.units, self.attributes, self.cell_methods) + self._metadata_manager.attributes = CubeAttrsDict(attributes or {}) - @metadata.setter - def metadata(self, value): - try: - value = CubeMetadata(**value) - except TypeError: - try: - value = CubeMetadata(*value) - except TypeError: - missing_attrs = [field for field in CubeMetadata._fields - if not hasattr(value, field)] - if missing_attrs: - raise TypeError('Invalid/incomplete metadata') - for name in CubeMetadata._fields: - setattr(self, name, getattr(value, name)) + def _dimensional_metadata(self, name_or_dimensional_metadata): + """Return a single _DimensionalMetadata instance that matches. + + Return a single _DimensionalMetadata instance that matches the given + name_or_dimensional_metadata. If one is not found, raise an error. - def is_compatible(self, other, ignore=None): """ - Return whether the cube is compatible with another. + found_item = None + for cube_method in [ + self.coord, + self.cell_measure, + self.ancillary_variable, + ]: + try: + found_item = cube_method(name_or_dimensional_metadata) + if found_item: + break + except KeyError: + pass + if not found_item: + raise KeyError(f"{name_or_dimensional_metadata} was not found in {self}.") + return found_item + + def is_compatible( + self, + other: Cube | CubeMetadata, + ignore: Iterable[str] | str | None = None, + ) -> bool: + """Return whether the cube is compatible with another. Compatibility is determined by comparing :meth:`iris.cube.Cube.name()`, :attr:`iris.cube.Cube.units`, :attr:`iris.cube.Cube.cell_methods` and :attr:`iris.cube.Cube.attributes` that are present in both objects. - Args: - - * other: + Parameters + ---------- + other : An instance of :class:`iris.cube.Cube` or :class:`iris.cube.CubeMetadata`. - * ignore: + ignore : A single attribute key or iterable of attribute keys to ignore when comparing the cubes. Default is None. To ignore all attributes set this to other.attributes. - Returns: - Boolean. + Returns + ------- + bool + Notes + ----- .. seealso:: :meth:`iris.util.describe_diff()` @@ -879,14 +1426,16 @@ def is_compatible(self, other, ignore=None): additional logic that is beyond the scope of this method. """ - compatible = (self.name() == other.name() and - self.units == other.units and - self.cell_methods == other.cell_methods) + compatible = ( + self.name() == other.name() + and self.units == other.units + and self.cell_methods == other.cell_methods + ) if compatible: common_keys = set(self.attributes).intersection(other.attributes) if ignore is not None: - if isinstance(ignore, six.string_types): + if isinstance(ignore, str): ignore = (ignore,) common_keys = common_keys.difference(ignore) for key in common_keys: @@ -896,9 +1445,8 @@ def is_compatible(self, other, ignore=None): return compatible - def convert_units(self, unit): - """ - Change the cube's units, converting the values in the data array. + def convert_units(self, unit: str | Unit) -> None: + """Change the cube's units, converting the values in the data array. For example, if a cube's :attr:`~iris.cube.Cube.units` are kelvin then:: @@ -909,223 +1457,373 @@ def convert_units(self, unit): celsius and subtract 273.15 from each value in :attr:`~iris.cube.Cube.data`. - .. warning:: - Calling this method will trigger any deferred loading, causing - the cube's data array to be loaded into memory. + Full list of supported units can be found in the UDUNITS-2 documentation + https://docs.unidata.ucar.edu/udunits/current/#Database + + This operation preserves lazy data. """ # If the cube has units convert the data. if self.units.is_unknown(): raise iris.exceptions.UnitConversionError( - 'Cannot convert from unknown units. ' - 'The "cube.units" attribute may be set directly.') + "Cannot convert from unknown units. " + 'The "cube.units" attribute may be set directly.' + ) if self.has_lazy_data(): # Make fixed copies of old + new units for a delayed conversion. - old_unit = self.units + old_unit = Unit(self.units) new_unit = unit - # Define a delayed conversion operation (i.e. a callback). - def pointwise_convert(values): - return old_unit.convert(values, new_unit) + pointwise_convert = partial(old_unit.convert, other=new_unit) - new_data = _lazy.lazy_elementwise(self.lazy_data(), - pointwise_convert) + new_data = _lazy.lazy_elementwise(self.lazy_data(), pointwise_convert) else: new_data = self.units.convert(self.data, unit) self.data = new_data self.units = unit - def add_cell_method(self, cell_method): + def add_cell_method(self, cell_method: CellMethod) -> None: """Add a :class:`~iris.coords.CellMethod` to the Cube.""" - self.cell_methods += (cell_method, ) - - def add_aux_coord(self, coord, data_dims=None): - """ - Adds a CF auxiliary coordinate to the cube. - - Args: - - * coord + self.cell_methods += (cell_method,) + + def add_aux_coord( + self, + coord: AuxCoord | DimCoord, + data_dims: Iterable[int] | int | None = None, + ) -> None: + """Add a CF auxiliary coordinate to the cube. + + Parameters + ---------- + coord : The :class:`iris.coords.DimCoord` or :class:`iris.coords.AuxCoord` instance to add to the cube. - - Kwargs: - - * data_dims + data_dims : Integer or iterable of integers giving the data dimensions spanned by the coordinate. - Raises a ValueError if a coordinate with identical metadata already - exists on the cube. + Raises + ------ + ValueError + Raises a ValueError if a coordinate with identical metadata already + exists on the cube. - See also :meth:`Cube.remove_coord()`. + See Also + -------- + remove_coord : + Remove a coordinate from the cube. """ if self.coords(coord): # TODO: just fail on duplicate object - raise ValueError('Duplicate coordinates are not permitted.') + raise iris.exceptions.CannotAddError( + "Duplicate coordinates are not permitted." + ) self._add_unique_aux_coord(coord, data_dims) - def _check_multi_dim_metadata(self, metadata, data_dims): + def _check_multi_dim_metadata( + self, + metadata: iris.coords._DimensionalMetadata, + data_dims: Iterable[int] | int | None, + ) -> tuple[int, ...]: # Convert to a tuple of integers if data_dims is None: data_dims = tuple() - elif isinstance(data_dims, Container): + elif isinstance(data_dims, Iterable): data_dims = tuple(int(d) for d in data_dims) else: data_dims = (int(data_dims),) if data_dims: if len(data_dims) != metadata.ndim: - msg = 'Invalid data dimensions: {} given, {} expected for ' \ - '{!r}.'.format(len(data_dims), metadata.ndim, - metadata.name()) - raise ValueError(msg) + msg = ( + "Invalid data dimensions: {} given, {} expected for " + "{!r}.".format(len(data_dims), metadata.ndim, metadata.name()) + ) + raise iris.exceptions.CannotAddError(msg) # Check compatibility with the shape of the data for i, dim in enumerate(data_dims): if metadata.shape[i] != self.shape[dim]: - msg = 'Unequal lengths. Cube dimension {} => {};' \ - ' metadata {!r} dimension {} => {}.' - raise ValueError(msg.format(dim, self.shape[dim], - metadata.name(), i, - metadata.shape[i])) + msg = ( + "Unequal lengths. Cube dimension {} => {};" + " metadata {!r} dimension {} => {}." + ) + raise iris.exceptions.CannotAddError( + msg.format( + dim, + self.shape[dim], + metadata.name(), + i, + metadata.shape[i], + ) + ) elif metadata.shape != (1,): - msg = 'Missing data dimensions for multi-valued {} {!r}' + msg = "Missing data dimensions for multi-valued {} {!r}" msg = msg.format(metadata.__class__.__name__, metadata.name()) - raise ValueError(msg) + raise iris.exceptions.CannotAddError(msg) return data_dims - def _add_unique_aux_coord(self, coord, data_dims): + def _add_unique_aux_coord( + self, + coord: AuxCoord | DimCoord, + data_dims: Iterable[int] | int | None, + ) -> None: data_dims = self._check_multi_dim_metadata(coord, data_dims) - self._aux_coords_and_dims.append([coord, data_dims]) - - def add_aux_factory(self, aux_factory): - """ - Adds an auxiliary coordinate factory to the cube. - Args: - - * aux_factory + def is_mesh_coord(anycoord: iris.coords.Coord) -> TypeGuard[MeshCoord]: + return hasattr(anycoord, "mesh") + + if is_mesh_coord(coord): + mesh = self.mesh + if mesh: + msg = ( + "{item} of Meshcoord {coord!r} is " + "{thisval!r}, which does not match existing " + "cube {item} of {ownval!r}." + ) + if coord.mesh != mesh: + raise iris.exceptions.CannotAddError( + msg.format( + item="mesh", + coord=coord, + thisval=coord.mesh, + ownval=mesh, + ) + ) + location = self.location + if coord.location != location: + raise iris.exceptions.CannotAddError( + msg.format( + item="location", + coord=coord, + thisval=coord.location, + ownval=location, + ) + ) + mesh_dims = (self.mesh_dim(),) + if data_dims != mesh_dims: + raise iris.exceptions.CannotAddError( + msg.format( + item="mesh dimension", + coord=coord, + thisval=data_dims, + ownval=mesh_dims, + ) + ) + + self._aux_coords_and_dims.append((coord, data_dims)) + + def add_aux_factory(self, aux_factory: AuxCoordFactory) -> None: + """Add an auxiliary coordinate factory to the cube. + + Parameters + ---------- + aux_factory : The :class:`iris.aux_factory.AuxCoordFactory` instance to add. """ if not isinstance(aux_factory, iris.aux_factory.AuxCoordFactory): - raise TypeError('Factory must be a subclass of ' - 'iris.aux_factory.AuxCoordFactory.') - cube_coords = self.coords() + raise TypeError( + "Factory must be a subclass of iris.aux_factory.AuxCoordFactory." + ) + + # Get all 'real' coords (i.e. not derived ones) : use private data + # rather than cube.coords(), as that is quite slow. + def coordsonly(coords_and_dims): + return [coord for coord, dims in coords_and_dims] + + cube_coords = coordsonly(self._dim_coords_and_dims) + coordsonly( + self._aux_coords_and_dims + ) + for dependency in aux_factory.dependencies: ref_coord = aux_factory.dependencies[dependency] if ref_coord is not None and ref_coord not in cube_coords: msg = "{} coordinate for factory is not present on cube {}" - raise ValueError(msg.format(ref_coord.name(), self.name())) + raise iris.exceptions.CannotAddError( + msg.format(ref_coord.name(), self.name()) + ) self._aux_factories.append(aux_factory) - def add_cell_measure(self, cell_measure, data_dims=None): - """ - Adds a CF cell measure to the cube. - - Args: + def add_cell_measure( + self, + cell_measure: CellMeasure, + data_dims: Iterable[int] | int | None = None, + ) -> None: + """Add a CF cell measure to the cube. - * cell_measure + Parameters + ---------- + cell_measure : The :class:`iris.coords.CellMeasure` instance to add to the cube. - - Kwargs: - - * data_dims + data_dims : Integer or iterable of integers giving the data dimensions spanned by the coordinate. - Raises a ValueError if a cell_measure with identical metadata already - exists on the cube. + Raises + ------ + ValueError + Raises a ValueError if a cell_measure with identical metadata already + exists on the cube. - See also - :meth:`Cube.remove_cell_measure()`. + See Also + -------- + remove_cell_measure : + Remove a cell measure from the cube. """ if self.cell_measures(cell_measure): - raise ValueError('Duplicate cell_measures are not permitted.') + raise iris.exceptions.CannotAddError( + "Duplicate cell_measures are not permitted." + ) data_dims = self._check_multi_dim_metadata(cell_measure, data_dims) - self._cell_measures_and_dims.append([cell_measure, data_dims]) - self._cell_measures_and_dims.sort(key=lambda cm_dims: - (cm_dims[0]._as_defn(), cm_dims[1])) + self._cell_measures_and_dims.append((cell_measure, data_dims)) + self._cell_measures_and_dims.sort( + key=lambda cm_dims: (cm_dims[0].metadata, cm_dims[1]) + ) + + def add_ancillary_variable( + self, + ancillary_variable: AncillaryVariable, + data_dims: Iterable[int] | int | None = None, + ) -> None: + """Add a CF ancillary variable to the cube. + + Parameters + ---------- + ancillary_variable : + The :class:`iris.coords.AncillaryVariable` instance to be added to + the cube. + data_dims : + Integer or iterable of integers giving the data dimensions spanned + by the ancillary variable. + + Raises + ------ + ValueError + Raises a ValueError if an ancillary variable with identical metadata + already exists on the cube. - def add_dim_coord(self, dim_coord, data_dim): """ - Add a CF coordinate to the cube. + if self.ancillary_variables(ancillary_variable): + raise iris.exceptions.CannotAddError( + "Duplicate ancillary variables not permitted" + ) + + data_dims = self._check_multi_dim_metadata(ancillary_variable, data_dims) + self._ancillary_variables_and_dims.append((ancillary_variable, data_dims)) + self._ancillary_variables_and_dims.sort( + key=lambda av_dims: (av_dims[0].metadata, av_dims[1]) + ) - Args: + def add_dim_coord(self, dim_coord: DimCoord, data_dim: int | tuple[int]) -> None: + """Add a CF coordinate to the cube. - * dim_coord + Parameters + ---------- + dim_coord : The :class:`iris.coords.DimCoord` instance to add to the cube. - * data_dim + data_dim : Integer giving the data dimension spanned by the coordinate. - Raises a ValueError if a coordinate with identical metadata already - exists on the cube or if a coord already exists for the - given dimension. + Raises + ------ + ValueError + Raises a ValueError if a coordinate with identical metadata already + exists on the cube or if a coord already exists for the + given dimension. - See also :meth:`Cube.remove_coord()`. + See Also + -------- + remove_coord : + Remove a coordinate from the cube. """ if self.coords(dim_coord): - raise ValueError('The coordinate already exists on the cube. ' - 'Duplicate coordinates are not permitted.') + raise iris.exceptions.CannotAddError( + "The coordinate already exists on the cube. " + "Duplicate coordinates are not permitted." + ) # Check dimension is available if self.coords(dimensions=data_dim, dim_coords=True): - raise ValueError('A dim_coord is already associated with ' - 'dimension %d.' % data_dim) + raise iris.exceptions.CannotAddError( + "A dim_coord is already associated with dimension %d." % data_dim + ) self._add_unique_dim_coord(dim_coord, data_dim) - def _add_unique_dim_coord(self, dim_coord, data_dim): + def _add_unique_dim_coord( + self, + dim_coord: DimCoord, + data_dim: int | tuple[int], + ) -> None: if isinstance(dim_coord, iris.coords.AuxCoord): - raise ValueError('The dim_coord may not be an AuxCoord instance.') + raise iris.exceptions.CannotAddError( + "The dim_coord may not be an AuxCoord instance." + ) # Convert data_dim to a single integer if isinstance(data_dim, Container): if len(data_dim) != 1: - raise ValueError('The supplied data dimension must be a' - ' single number.') + raise iris.exceptions.CannotAddError( + "The supplied data dimension must be a single number." + ) data_dim = int(list(data_dim)[0]) else: data_dim = int(data_dim) # Check data_dim value is valid if data_dim < 0 or data_dim >= self.ndim: - raise ValueError('The cube does not have the specified dimension ' - '(%d)' % data_dim) + raise iris.exceptions.CannotAddError( + "The cube does not have the specified dimension (%d)" % data_dim + ) # Check compatibility with the shape of the data if dim_coord.shape[0] != self.shape[data_dim]: - msg = 'Unequal lengths. Cube dimension {} => {}; coord {!r} => {}.' - raise ValueError(msg.format(data_dim, self.shape[data_dim], - dim_coord.name(), - len(dim_coord.points))) + msg = "Unequal lengths. Cube dimension {} => {}; coord {!r} => {}." + raise iris.exceptions.CannotAddError( + msg.format( + data_dim, + self.shape[data_dim], + dim_coord.name(), + len(dim_coord.points), + ) + ) - self._dim_coords_and_dims.append([dim_coord, int(data_dim)]) + self._dim_coords_and_dims.append((dim_coord, int(data_dim))) - def remove_aux_factory(self, aux_factory): - """Removes the given auxiliary coordinate factory from the cube.""" + def remove_aux_factory(self, aux_factory: AuxCoordFactory) -> None: + """Remove the given auxiliary coordinate factory from the cube.""" self._aux_factories.remove(aux_factory) - def _remove_coord(self, coord): - self._dim_coords_and_dims = [(coord_, dim) for coord_, dim in - self._dim_coords_and_dims if coord_ - is not coord] - self._aux_coords_and_dims = [(coord_, dims) for coord_, dims in - self._aux_coords_and_dims if coord_ - is not coord] - - def remove_coord(self, coord): - """ - Removes a coordinate from the cube. - - Args: - - * coord (string or coord) + def _remove_coord(self, coord: DimCoord | AuxCoord) -> None: + self._dim_coords_and_dims = [ + (coord_, dim) + for coord_, dim in self._dim_coords_and_dims + if coord_ is not coord + ] + self._aux_coords_and_dims = [ + (coord_, dims) + for coord_, dims in self._aux_coords_and_dims + if coord_ is not coord + ] + for aux_factory in self.aux_factories: + if coord.metadata == aux_factory.metadata: + self.remove_aux_factory(aux_factory) + + def remove_coord(self, coord: str | DimCoord | AuxCoord | AuxCoordFactory) -> None: + """Remove a coordinate from the cube. + + Parameters + ---------- + coord : The (name of the) coordinate to remove from the cube. - See also :meth:`Cube.add_dim_coord()` - and :meth:`Cube.add_aux_coord()`. + See Also + -------- + add_dim_coord : + Add a CF coordinate to the cube. + add_aux_coord : + Add a CF auxiliary coordinate to the cube. """ coord = self.coord(coord) @@ -1134,45 +1832,64 @@ def remove_coord(self, coord): for factory in self.aux_factories: factory.update(coord) - def remove_cell_measure(self, cell_measure): - """ - Removes a cell measure from the cube. - - Args: + def remove_cell_measure(self, cell_measure: str | CellMeasure) -> None: + """Remove a cell measure from the cube. - * cell_measure (string or cell_measure) + Parameters + ---------- + cell_measure : The (name of the) cell measure to remove from the cube. As either - (a) a :attr:`standard_name`, :attr:`long_name`, or - :attr:`var_name`. Defaults to value of `default` - (which itself defaults to `unknown`) as defined in - :class:`iris._cube_coord_common.CFVariableMixin`. - - (b) a cell_measure instance with metadata equal to that of - the desired cell_measures. - - .. note:: + * (a) a :attr:`standard_name`, :attr:`long_name`, or + :attr:`var_name`. Defaults to value of `default` + (which itself defaults to `unknown`) as defined in + :class:`iris.common.CFVariableMixin`. - If the argument given does not represent a valid cell_measure on - the cube, an :class:`iris.exceptions.CellMeasureNotFoundError` - is raised. + * (b) a cell_measure instance with metadata equal to that of + the desired cell_measures. - .. seealso:: + Notes + ----- + If the argument given does not represent a valid cell_measure on + the cube, an :class:`iris.exceptions.CellMeasureNotFoundError` + is raised. - :meth:`Cube.add_cell_measure()` + See Also + -------- + add_cell_measure : + Add a CF cell measure to the cube. """ cell_measure = self.cell_measure(cell_measure) - self._cell_measures_and_dims = [[cell_measure_, dim] for cell_measure_, - dim in self._cell_measures_and_dims - if cell_measure_ is not cell_measure] + self._cell_measures_and_dims = [ + (cell_measure_, dim) + for cell_measure_, dim in self._cell_measures_and_dims + if cell_measure_ is not cell_measure + ] - def replace_coord(self, new_coord): - """ - Replace the coordinate whose metadata matches the given coordinate. + def remove_ancillary_variable( + self, + ancillary_variable: str | AncillaryVariable, + ) -> None: + """Remove an ancillary variable from the cube. + + Parameters + ---------- + ancillary_variable : + The (name of the) AncillaryVariable to remove from the cube. """ + ancillary_variable = self.ancillary_variable(ancillary_variable) + + self._ancillary_variables_and_dims = [ + (ancillary_variable_, dim) + for ancillary_variable_, dim in self._ancillary_variables_and_dims + if ancillary_variable_ is not ancillary_variable + ] + + def replace_coord(self, new_coord: DimCoord | AuxCoord) -> None: + """Replace the coordinate whose metadata matches the given coordinate.""" old_coord = self.coord(new_coord) dims = self.coord_dims(old_coord) was_dimensioned = old_coord in self.dim_coords @@ -1185,86 +1902,166 @@ def replace_coord(self, new_coord): for factory in self.aux_factories: factory.update(old_coord, new_coord) - def coord_dims(self, coord): - """ - Returns a tuple of the data dimensions relevant to the given - coordinate. + def coord_dims( + self, coord: str | DimCoord | AuxCoord | AuxCoordFactory + ) -> tuple[int, ...]: + """Return a tuple of the data dimensions relevant to the given coordinate. When searching for the given coordinate in the cube the comparison is made using coordinate metadata equality. Hence the given coordinate instance need not exist on the cube, and may contain different coordinate values. - Args: - - * coord (string or coord) + Parameters + ---------- + coord : The (name of the) coord to look for. + Returns + ------- + tuple: + A tuple of the data dimensions relevant to the given coordinate. """ - - coord = self.coord(coord) - - # Search for existing coordinate (object) on the cube, faster lookup - # than equality - makes no functional difference. - matches = [(dim,) for coord_, dim in self._dim_coords_and_dims if - coord_ is coord] - if not matches: - matches = [dims for coord_, dims in self._aux_coords_and_dims if - coord_ is coord] - - # Search derived aux coords - target_defn = coord._as_defn() - if not matches: - def match(factory): - return factory._as_defn() == target_defn - factories = filter(match, self._aux_factories) - matches = [factory.derived_dims(self.coord_dims) for factory in - factories] - - if not matches: + name_provided = False + if isinstance(coord, str): + # Forced to look-up the coordinate if we only have the name. + coord = self.coord(coord) + name_provided = True + + coord_id = id(coord) + + # Dimension of dimension coordinate by object id + dims_by_id: dict[int, tuple[int, ...]] = { + id(c): (d,) for c, d in self._dim_coords_and_dims + } + # Check for id match - faster than equality check + match = dims_by_id.get(coord_id) + + if match is None: + # Dimension/s of auxiliary coordinate by object id + aux_dims_by_id = {id(c): d for c, d in self._aux_coords_and_dims} + # Check for id match - faster than equality + match = aux_dims_by_id.get(coord_id) + if match is None: + dims_by_id.update(aux_dims_by_id) + + if match is None and not name_provided: + # We may have an equivalent coordinate but not the actual + # cube coordinate instance - so forced to perform coordinate + # lookup to attempt to retrieve it + coord = self.coord(coord) + # Check for id match - faster than equality + match = dims_by_id.get(id(coord)) + + # Search derived aux coordinates + if match is None: + target_metadata = coord.metadata + + def matcher(factory): + return factory.metadata == target_metadata + + factories = filter(matcher, self._aux_factories) + matches = [factory.derived_dims(self.coord_dims) for factory in factories] + if matches: + match = matches[0] + + if match is None: raise iris.exceptions.CoordinateNotFoundError(coord.name()) - return matches[0] + return match - def cell_measure_dims(self, cell_measure): - """ - Returns a tuple of the data dimensions relevant to the given - CellMeasure. + def cell_measure_dims(self, cell_measure: str | CellMeasure) -> tuple[int, ...]: + """Return a tuple of the data dimensions relevant to the given CellMeasure. - * cell_measure - The CellMeasure to look for. + Parameters + ---------- + cell_measure : + The (name of the) cell measure to look for. + Returns + ------- + tuple: + A tuple of the data dimensions relevant to the given cell measure. """ + cell_measure = self.cell_measure(cell_measure) + # Search for existing cell measure (object) on the cube, faster lookup # than equality - makes no functional difference. - matches = [dims for cm_, dims in self._cell_measures_and_dims if - cm_ is cell_measure] + matches = [ + dims for cm_, dims in self._cell_measures_and_dims if cm_ is cell_measure + ] if not matches: raise iris.exceptions.CellMeasureNotFoundError(cell_measure.name()) return matches[0] - def aux_factory(self, name=None, standard_name=None, long_name=None, - var_name=None): + def ancillary_variable_dims( + self, + ancillary_variable: str | AncillaryVariable, + ) -> tuple[int, ...]: + """Return a tuple of the data dimensions relevant to the given AncillaryVariable. + + Parameters + ---------- + ancillary_variable : str or AncillaryVariable + The (name of the) AncillaryVariable to look for. + + Returns + ------- + tuple: + A tuple of the data dimensions relevant to the given ancillary variable. """ - Returns the single coordinate factory that matches the criteria, - or raises an error if not found. + ancillary_variable = self.ancillary_variable(ancillary_variable) + + # Search for existing ancillary variable (object) on the cube, faster + # lookup than equality - makes no functional difference. + matches = [ + dims + for av, dims in self._ancillary_variables_and_dims + if av is ancillary_variable + ] + + if not matches: + raise iris.exceptions.AncillaryVariableNotFoundError( + ancillary_variable.name() + ) - Kwargs: + return matches[0] + + def aux_factory( + self, + name: str | None = None, + standard_name: str | None = None, + long_name: str | None = None, + var_name: str | None = None, + ) -> AuxCoordFactory: + """Return the single coordinate factory that matches the criteria. + + Return the single coordinate factory that matches the criteria, + or raises an error if not found. - * name + Parameters + ---------- + name : If not None, matches against factory.name(). - * standard_name + standard_name : The CF standard name of the desired coordinate factory. If None, does not check for standard name. - * long_name + long_name : An unconstrained description of the coordinate factory. If None, does not check for long_name. - * var_name - The netCDF variable name of the desired coordinate factory. + var_name : + The NetCDF variable name of the desired coordinate factory. If None, does not check for var_name. + Returns + ------- + AuxCoordFactory: + The single coordinate factory that matches the criteria. + + Notes + ----- .. note:: If the arguments given do not result in precisely 1 coordinate @@ -1272,103 +2069,127 @@ def aux_factory(self, name=None, standard_name=None, long_name=None, :class:`iris.exceptions.CoordinateNotFoundError` is raised. """ - factories = self.aux_factories + factories = list(self.aux_factories) if name is not None: - factories = [factory for factory in factories if - factory.name() == name] + factories = [factory for factory in factories if factory.name() == name] if standard_name is not None: - factories = [factory for factory in factories if - factory.standard_name == standard_name] + factories = [ + factory + for factory in factories + if factory.standard_name == standard_name + ] if long_name is not None: - factories = [factory for factory in factories if - factory.long_name == long_name] + factories = [ + factory for factory in factories if factory.long_name == long_name + ] if var_name is not None: - factories = [factory for factory in factories if - factory.var_name == var_name] + factories = [ + factory for factory in factories if factory.var_name == var_name + ] if len(factories) > 1: factory_names = (factory.name() for factory in factories) - msg = 'Expected to find exactly one coordinate factory, but ' \ - 'found {}. They were: {}.'.format(len(factories), - ', '.join(factory_names)) + msg = ( + "Expected to find exactly one coordinate factory, but " + "found {}. They were: {}.".format( + len(factories), ", ".join(factory_names) + ) + ) raise iris.exceptions.CoordinateNotFoundError(msg) elif len(factories) == 0: - msg = 'Expected to find exactly one coordinate factory, but ' \ - 'found none.' + msg = "Expected to find exactly one coordinate factory, but found none." raise iris.exceptions.CoordinateNotFoundError(msg) return factories[0] - def coords(self, name_or_coord=None, standard_name=None, - long_name=None, var_name=None, attributes=None, axis=None, - contains_dimension=None, dimensions=None, coord_system=None, - dim_coords=None): - """ - Return a list of coordinates in this cube fitting the given criteria. - - Kwargs: - - * name_or_coord - Either - - (a) a :attr:`standard_name`, :attr:`long_name`, or - :attr:`var_name`. Defaults to value of `default` - (which itself defaults to `unknown`) as defined in - :class:`iris._cube_coord_common.CFVariableMixin`. - - (b) a coordinate instance with metadata equal to that of - the desired coordinates. Accepts either a - :class:`iris.coords.DimCoord`, :class:`iris.coords.AuxCoord`, - :class:`iris.aux_factory.AuxCoordFactory` - or :class:`iris.coords.CoordDefn`. - * standard_name - The CF standard name of the desired coordinate. If None, does not - check for standard name. - * long_name - An unconstrained description of the coordinate. If None, does not - check for long_name. - * var_name - The netCDF variable name of the desired coordinate. If None, does - not check for var_name. - * attributes - A dictionary of attributes desired on the coordinates. If None, - does not check for attributes. - * axis - The desired coordinate axis, see - :func:`iris.util.guess_coord_axis`. If None, does not check for - axis. Accepts the values 'X', 'Y', 'Z' and 'T' (case-insensitive). - * contains_dimension - The desired coordinate contains the data dimension. If None, does + def coords( + self, + name_or_coord: str + | DimCoord + | AuxCoord + | AuxCoordFactory + | CoordMetadata + | None = None, + standard_name: str | None = None, + long_name: str | None = None, + var_name: str | None = None, + attributes: Mapping | None = None, + axis: iris.util.Axis | None = None, + contains_dimension=None, + dimensions: Iterable[int] | int | None = None, + coord_system=None, + dim_coords: bool | None = None, + mesh_coords: bool | None = None, + ) -> list[DimCoord | AuxCoord]: + r"""Return a list of coordinates from the :class:`Cube` that match the provided criteria. + + Parameters + ---------- + name_or_coord : + Either, + + * a :attr:`~iris.common.mixin.CFVariableMixin.standard_name`, + :attr:`~iris.common.mixin.CFVariableMixin.long_name`, or + :attr:`~iris.common.mixin.CFVariableMixin.var_name` which is + compared against the :meth:`~iris.common.mixin.CFVariableMixin.name`. + + * a coordinate or metadata instance equal to that of the desired + coordinate e.g., :class:`~iris.coords.DimCoord` or + :class:`~iris.common.metadata.CoordMetadata`. + standard_name : + The CF standard name of the desired coordinate. If ``None``, does not + check for ``standard name``. + long_name : + An unconstrained description of the coordinate. If ``None``, does not + check for ``long_name``. + var_name : + The NetCDF variable name of the desired coordinate. If ``None``, does + not check for ``var_name``. + attributes : + A dictionary of attributes desired on the coordinates. If ``None``, + does not check for ``attributes``. + axis : + The desired coordinate axis, see :func:`iris.util.guess_coord_axis`. + If ``None``, does not check for ``axis``. Accepts the values ``X``, + ``Y``, ``Z`` and ``T`` (case-insensitive). + contains_dimension : + The desired coordinate contains the data dimension. If ``None``, does not check for the dimension. - * dimensions + dimensions : The exact data dimensions of the desired coordinate. Coordinates - with no data dimension can be found with an empty tuple or list - (i.e. ``()`` or ``[]``). If None, does not check for dimensions. - * coord_system - Whether the desired coordinates have coordinate systems equal to - the given coordinate system. If None, no check is done. - * dim_coords - Set to True to only return coordinates that are the cube's - dimension coordinates. Set to False to only return coordinates - that are the cube's auxiliary and derived coordinates. If None, - returns all coordinates. + with no data dimension can be found with an empty ``tuple`` or + ``list`` i.e., ``()`` or ``[]``. If ``None``, does not check for + dimensions. + coord_system : + Whether the desired coordinates have a coordinate system equal to + the given coordinate system. If ``None``, no check is done. + dim_coords : + Set to ``True`` to only return coordinates that are the cube's + dimension coordinates. Set to ``False`` to only return coordinates + that are the cube's auxiliary, mesh and derived coordinates. + If ``None``, returns all coordinates. + mesh_coords : + Set to ``True`` to return only coordinates which are + :class:`~iris.mesh.MeshCoord`\'s. + Set to ``False`` to return only non-mesh coordinates. + If ``None``, returns all coordinates. + + Returns + ------- + A list containing zero or more coordinates matching the provided criteria. + + See Also + -------- + coord : + For matching exactly one coordinate. - See also :meth:`Cube.coord()`. """ - name = None - coord = None - - if isinstance(name_or_coord, six.string_types): - name = name_or_coord - else: - coord = name_or_coord - - coords_and_factories = [] + coords_and_factories: list[DimCoord | AuxCoord | AuxCoordFactory] = [] if dim_coords in [True, None]: coords_and_factories += list(self.dim_coords) @@ -1377,64 +2198,55 @@ def coords(self, name_or_coord=None, standard_name=None, coords_and_factories += list(self.aux_coords) coords_and_factories += list(self.aux_factories) - if name is not None: - coords_and_factories = [coord_ for coord_ in coords_and_factories - if coord_.name() == name] - - if standard_name is not None: - coords_and_factories = [coord_ for coord_ in coords_and_factories - if coord_.standard_name == standard_name] - - if long_name is not None: - coords_and_factories = [coord_ for coord_ in coords_and_factories - if coord_.long_name == long_name] - - if var_name is not None: - coords_and_factories = [coord_ for coord_ in coords_and_factories - if coord_.var_name == var_name] - - if axis is not None: - axis = axis.upper() - guess_axis = iris.util.guess_coord_axis - coords_and_factories = [coord_ for coord_ in coords_and_factories - if guess_axis(coord_) == axis] - - if attributes is not None: - if not isinstance(attributes, Mapping): - msg = 'The attributes keyword was expecting a dictionary ' \ - 'type, but got a %s instead.' % type(attributes) - raise ValueError(msg) - - def attr_filter(coord_): - return all(k in coord_.attributes and coord_.attributes[k] == v - for k, v in six.iteritems(attributes)) - - coords_and_factories = [coord_ for coord_ in coords_and_factories - if attr_filter(coord_)] + if mesh_coords is not None: + # Select on mesh or non-mesh. + mesh_coords = bool(mesh_coords) + # Use duck typing to avoid importing from iris.mesh, + # which could be a circular import. + if mesh_coords: + # *only* MeshCoords + coords_and_factories = [ + item for item in coords_and_factories if hasattr(item, "mesh") + ] + else: + # *not* MeshCoords + coords_and_factories = [ + item for item in coords_and_factories if not hasattr(item, "mesh") + ] + + coords_and_factories = metadata_filter( + coords_and_factories, + item=name_or_coord, + standard_name=standard_name, + long_name=long_name, + var_name=var_name, + attributes=attributes, + axis=axis, + ) if coord_system is not None: - coords_and_factories = [coord_ for coord_ in coords_and_factories - if coord_.coord_system == coord_system] - - if coord is not None: - if isinstance(coord, iris.coords.CoordDefn): - defn = coord - else: - defn = coord._as_defn() - coords_and_factories = [coord_ for coord_ in coords_and_factories - if coord_._as_defn() == defn] + coords_and_factories = [ + coord_ + for coord_ in coords_and_factories + if coord_.coord_system == coord_system + ] if contains_dimension is not None: - coords_and_factories = [coord_ for coord_ in coords_and_factories - if contains_dimension in - self.coord_dims(coord_)] + coords_and_factories = [ + coord_ + for coord_ in coords_and_factories + if contains_dimension in self.coord_dims(coord_) + ] if dimensions is not None: - if not isinstance(dimensions, Container): + if not isinstance(dimensions, Iterable): dimensions = [dimensions] dimensions = tuple(dimensions) - coords_and_factories = [coord_ for coord_ in coords_and_factories - if self.coord_dims(coord_) == dimensions] + coords_and_factories = [ + coord_ + for coord_ in coords_and_factories + if self.coord_dims(coord_) == dimensions + ] # If any factories remain after the above filters we have to make the # coords so they can be returned @@ -1444,70 +2256,159 @@ def extract_coord(coord_or_factory): elif isinstance(coord_or_factory, iris.coords.Coord): coord = coord_or_factory else: - msg = 'Expected Coord or AuxCoordFactory, got ' \ - '{!r}.'.format(type(coord_or_factory)) + msg = "Expected Coord or AuxCoordFactory, got {!r}.".format( + type(coord_or_factory) + ) raise ValueError(msg) return coord - coords = [extract_coord(coord_or_factory) for coord_or_factory in - coords_and_factories] - return coords + coords = [ + extract_coord(coord_or_factory) for coord_or_factory in coords_and_factories + ] - def coord(self, name_or_coord=None, standard_name=None, - long_name=None, var_name=None, attributes=None, axis=None, - contains_dimension=None, dimensions=None, coord_system=None, - dim_coords=None): - """ - Return a single coord given the same arguments as :meth:`Cube.coords`. - - .. note:: - - If the arguments given do not result in precisely 1 coordinate - being matched, an :class:`iris.exceptions.CoordinateNotFoundError` - is raised. - - .. seealso:: + return coords - :meth:`Cube.coords()` for full keyword - documentation. + def coord( + self, + name_or_coord: str + | DimCoord + | AuxCoord + | AuxCoordFactory + | CoordMetadata + | None = None, + standard_name: str | None = None, + long_name: str | None = None, + var_name: str | None = None, + attributes: Mapping | None = None, + axis: iris.util.Axis | None = None, + contains_dimension=None, + dimensions: Iterable[int] | int | None = None, + coord_system=None, + dim_coords: bool | None = None, + mesh_coords: bool | None = None, + ) -> DimCoord | AuxCoord: + r"""Return a single coordinate from the :class:`Cube` that matches the provided criteria. + + Parameters + ---------- + name_or_coord : + Either, + + * a :attr:`~iris.common.mixin.CFVariableMixin.standard_name`, + :attr:`~iris.common.mixin.CFVariableMixin.long_name`, or + :attr:`~iris.common.mixin.CFVariableMixin.var_name` which is + compared against the :meth:`~iris.common.mixin.CFVariableMixin.name`. + + * a coordinate or metadata instance equal to that of the desired + coordinate e.g., :class:`~iris.coords.DimCoord` or + :class:`~iris.common.metadata.CoordMetadata`. + standard_name : + The CF standard name of the desired coordinate. If ``None``, does not + check for ``standard name``. + long_name : + An unconstrained description of the coordinate. If ``None``, does not + check for ``long_name``. + var_name : + The NetCDF variable name of the desired coordinate. If ``None``, does + not check for ``var_name``. + attributes : + A dictionary of attributes desired on the coordinates. If ``None``, + does not check for ``attributes``. + axis : + The desired coordinate axis, see :func:`iris.util.guess_coord_axis`. + If ``None``, does not check for ``axis``. Accepts the values ``X``, + ``Y``, ``Z`` and ``T`` (case-insensitive). + contains_dimension : + The desired coordinate contains the data dimension. If ``None``, does + not check for the dimension. + dimensions : + The exact data dimensions of the desired coordinate. Coordinates + with no data dimension can be found with an empty ``tuple`` or + ``list`` i.e., ``()`` or ``[]``. If ``None``, does not check for + dimensions. + coord_system : + Whether the desired coordinates have a coordinate system equal to + the given coordinate system. If ``None``, no check is done. + dim_coords : + Set to ``True`` to only return coordinates that are the cube's + dimension coordinates. Set to ``False`` to only return coordinates + that are the cube's auxiliary, mesh and derived coordinates. + If ``None``, returns all coordinates. + mesh_coords : + Set to ``True`` to return only coordinates which are + :class:`~iris.mesh.MeshCoord`\'s. + Set to ``False`` to return only non-mesh coordinates. + If ``None``, returns all coordinates. + + Returns + ------- + The coordinate that matches the provided criteria. + + Notes + ----- + .. note:: + + If the arguments given do not result in **precisely one** coordinate, + then a :class:`~iris.exceptions.CoordinateNotFoundError` is raised. + + See Also + -------- + coords : + For matching zero or more coordinates. """ - coords = self.coords(name_or_coord=name_or_coord, - standard_name=standard_name, - long_name=long_name, var_name=var_name, - attributes=attributes, axis=axis, - contains_dimension=contains_dimension, - dimensions=dimensions, - coord_system=coord_system, - dim_coords=dim_coords) + coords = self.coords( + name_or_coord=name_or_coord, + standard_name=standard_name, + long_name=long_name, + var_name=var_name, + attributes=attributes, + axis=axis, + contains_dimension=contains_dimension, + dimensions=dimensions, + coord_system=coord_system, + dim_coords=dim_coords, + mesh_coords=mesh_coords, + ) if len(coords) > 1: - msg = 'Expected to find exactly 1 coordinate, but found %s. ' \ - 'They were: %s.' % (len(coords), ', '.join(coord.name() for - coord in coords)) - raise iris.exceptions.CoordinateNotFoundError(msg) + emsg = ( + f"Expected to find exactly 1 coordinate, but found {len(coords)}. " + f"They were: {', '.join(coord.name() for coord in coords)}." + ) + raise iris.exceptions.CoordinateNotFoundError(emsg) elif len(coords) == 0: _name = name_or_coord if name_or_coord is not None: - if not isinstance(name_or_coord, six.string_types): + if not isinstance(name_or_coord, str): _name = name_or_coord.name() - bad_name = _name or standard_name or long_name or '' - msg = 'Expected to find exactly 1 %s coordinate, but found ' \ - 'none.' % bad_name - raise iris.exceptions.CoordinateNotFoundError(msg) + emsg = ( + "Expected to find exactly 1 coordinate matching the given " + f"{_name!r} coordinate's metadata, but found none." + ) + raise iris.exceptions.CoordinateNotFoundError(emsg) + + bad_name = _name or standard_name or long_name or "" + emsg = ( + f"Expected to find exactly 1 {bad_name!r} coordinate, " + "but found none." + ) + raise iris.exceptions.CoordinateNotFoundError(emsg) return coords[0] - def coord_system(self, spec=None): - """ - Find the coordinate system of the given type. + def coord_system( + self, + spec: str | type[iris.coord_systems.CoordSystem] | None = None, + ) -> iris.coord_systems.CoordSystem | None: + """Find the coordinate system of the given type. If no target coordinate system is provided then find any available coordinate system. - Kwargs: - - * spec: + Parameters + ---------- + spec : The the name or type of a coordinate system subclass. E.g. :: @@ -1520,11 +2421,12 @@ def coord_system(self, spec=None): If spec is None, then find any available coordinate systems within the :class:`iris.cube.Cube`. - Returns: - The :class:`iris.coord_systems.CoordSystem` or None. + Returns + ------- + :class:`iris.coord_systems.CoordSystem` or None. """ - if isinstance(spec, six.string_types) or spec is None: + if isinstance(spec, str) or spec is None: spec_name = spec else: msg = "type %s is not a subclass of CoordSystem" % spec @@ -1539,8 +2441,7 @@ def coord_system(self, spec=None): result = None if spec_name is None: - for key in sorted(coord_systems.keys(), - key=lambda class_: class_.__name__): + for key in sorted(coord_systems.keys(), key=lambda class_: class_.__name__): result = coord_systems[key] break else: @@ -1548,29 +2449,117 @@ def coord_system(self, spec=None): return result - def cell_measures(self, name_or_cell_measure=None): + def _any_meshcoord(self) -> MeshCoord | None: + """Return a MeshCoord if there are any, else None.""" + mesh_coords = self.coords(mesh_coords=True) + if mesh_coords: + result = mesh_coords[0] + else: + result = None + return result # type: ignore[return-value] + + @property + def mesh(self) -> iris.mesh.MeshXY | None: + r"""Return the unstructured :class:`~iris.mesh.MeshXY` associated with the cube. + + Return the unstructured :class:`~iris.mesh.MeshXY` + associated with the cube, if the cube has any + :class:`~iris.mesh.MeshCoord`, + or ``None`` if it has none. + + Returns + ------- + :class:`iris.mesh.MeshXY` or None + The mesh of the cube + :class:`~iris.mesh.MeshCoord`'s, + or ``None``. + + """ + coord = self._any_meshcoord() + if coord is None: + result = None + else: + result = coord.mesh + return result + + @property + def location(self) -> iris.mesh.components.Location | None: + r"""Return the mesh "location" of the cube data. + + Return the mesh "location" of the cube data, if the cube has any + :class:`~iris.mesh.MeshCoord`, + or ``None`` if it has none. + + Returns + ------- + str or None + The mesh location of the cube + :class:`~iris.mesh.MeshCoords` + (i.e. one of 'face' / 'edge' / 'node'), or ``None``. + + """ + coord = self._any_meshcoord() + if coord is None: + result = None + else: + result = coord.location + return result + + def mesh_dim(self) -> int | None: + r"""Return the cube dimension of the mesh. + + Return the cube dimension of the mesh, if the cube has any + :class:`~iris.mesh.MeshCoord`, + or ``None`` if it has none. + + Returns + ------- + int or None + The cube dimension which the cube + :class:`~iris.mesh.MeshCoord` map to, + or ``None``. + """ - Return a list of cell measures in this cube fitting the given criteria. + coord = self._any_meshcoord() + if coord is None: + result = None + else: + (result,) = self.coord_dims(coord) # result is a 1-tuple + return result - Kwargs: + def cell_measures( + self, + name_or_cell_measure: str | CellMeasure | None = None, + ) -> list[CellMeasure]: + """Return a list of cell measures in this cube fitting the given criteria. - * name_or_cell_measure + Parameters + ---------- + name_or_cell_measure : Either - (a) a :attr:`standard_name`, :attr:`long_name`, or - :attr:`var_name`. Defaults to value of `default` - (which itself defaults to `unknown`) as defined in - :class:`iris._cube_coord_common.CFVariableMixin`. + * (a) a :attr:`standard_name`, :attr:`long_name`, or + :attr:`var_name`. Defaults to value of `default` + (which itself defaults to `unknown`) as defined in + :class:`iris.common.CFVariableMixin`. - (b) a cell_measure instance with metadata equal to that of - the desired cell_measures. + * (b) a cell_measure instance with metadata equal to that of + the desired cell_measures. - See also :meth:`Cube.cell_measure()`. + Returns + ------- + list + List of cell measures in this cube fitting the given criteria. + + See Also + -------- + cell_measure : + Return a single cell_measure. """ name = None - if isinstance(name_or_cell_measure, six.string_types): + if isinstance(name_or_cell_measure, str): name = name_or_cell_measure else: cell_measure = name_or_cell_measure @@ -1586,58 +2575,204 @@ def cell_measures(self, name_or_cell_measure=None): cell_measures.append(cm) return cell_measures - def cell_measure(self, name_or_cell_measure=None): - """ - Return a single cell_measure given the same arguments as - :meth:`Cube.cell_measures`. + def cell_measure( + self, + name_or_cell_measure: str | CellMeasure | None = None, + ) -> CellMeasure: + """Return a single cell_measure given the same arguments as :meth:`Cube.cell_measures`. + Notes + ----- .. note:: If the arguments given do not result in precisely 1 cell_measure being matched, an :class:`iris.exceptions.CellMeasureNotFoundError` is raised. - .. seealso:: + Returns + ------- + CellMeasure + A single cell measure in this cube fitting the given criteria. - :meth:`Cube.cell_measures()` - for full keyword documentation. + See Also + -------- + cell_measures : + For full keyword documentation. """ cell_measures = self.cell_measures(name_or_cell_measure) if len(cell_measures) > 1: - msg = ('Expected to find exactly 1 cell_measure, but found {}. ' - 'They were: {}.') - msg = msg.format(len(cell_measures), - ', '.join(cm.name() for cm in cell_measures)) + msg = ( + "Expected to find exactly 1 cell_measure, but found {}. " + "They were: {}." + ) + msg = msg.format( + len(cell_measures), + ", ".join(cm.name() for cm in cell_measures), + ) raise iris.exceptions.CellMeasureNotFoundError(msg) elif len(cell_measures) == 0: - if isinstance(name_or_cell_measure, six.string_types): + if isinstance(name_or_cell_measure, str): bad_name = name_or_cell_measure else: - bad_name = (name_or_cell_measure and - name_or_cell_measure.name()) or '' - msg = 'Expected to find exactly 1 %s cell_measure, but found ' \ - 'none.' % bad_name + bad_name = (name_or_cell_measure and name_or_cell_measure.name()) or "" + if name_or_cell_measure is not None: + emsg = ( + "Expected to find exactly 1 cell measure matching the given " + f"{bad_name!r} cell measure's metadata, but found none." + ) + raise iris.exceptions.CellMeasureNotFoundError(emsg) + msg = ( + f"Expected to find exactly 1 {bad_name!r} cell measure, " + "but found none." + ) raise iris.exceptions.CellMeasureNotFoundError(msg) return cell_measures[0] - @property - def cell_methods(self): + def ancillary_variables( + self, + name_or_ancillary_variable: str | AncillaryVariable | None = None, + ) -> list[AncillaryVariable]: + """Return a list of ancillary variable in this cube fitting the given criteria. + + Parameters + ---------- + name_or_ancillary_variable : + Either + + * (a) a :attr:`standard_name`, :attr:`long_name`, or + :attr:`var_name`. Defaults to value of `default` + (which itself defaults to `unknown`) as defined in + :class:`iris.common.CFVariableMixin`. + + * (b) a ancillary_variable instance with metadata equal to that of + the desired ancillary_variables. + + Returns + ------- + list + List of ancillary variables in this cube fitting the given criteria. + + See Also + -------- + ancillary_variable : + Return a ancillary_variable. + + """ + name = None + + if isinstance(name_or_ancillary_variable, str): + name = name_or_ancillary_variable + else: + ancillary_variable = name_or_ancillary_variable + ancillary_variables = [] + for av, _ in self._ancillary_variables_and_dims: + if name is not None: + if av.name() == name: + ancillary_variables.append(av) + elif ancillary_variable is not None: + if av == ancillary_variable: + ancillary_variables.append(av) + else: + ancillary_variables.append(av) + return ancillary_variables + + def ancillary_variable( + self, + name_or_ancillary_variable: str | AncillaryVariable | None = None, + ) -> AncillaryVariable: + """Return a single ancillary_variable given the same arguments as :meth:`Cube.ancillary_variables`. + + Notes + ----- + .. note:: + + If the arguments given do not result in precisely 1 + ancillary_variable being matched, an + :class:`iris.exceptions.AncillaryVariableNotFoundError` is raised. + + Returns + ------- + AncillaryVariable + A single ancillary variable in this cube fitting the given criteria. + + See Also + -------- + ancillary_variables : + For full keyword documentation. + """ + ancillary_variables = self.ancillary_variables(name_or_ancillary_variable) + + if len(ancillary_variables) > 1: + msg = ( + "Expected to find exactly 1 ancillary_variable, but found " + "{}. They were: {}." + ) + msg = msg.format( + len(ancillary_variables), + ", ".join(anc_var.name() for anc_var in ancillary_variables), + ) + raise iris.exceptions.AncillaryVariableNotFoundError(msg) + elif len(ancillary_variables) == 0: + if isinstance(name_or_ancillary_variable, str): + bad_name = name_or_ancillary_variable + else: + bad_name = ( + name_or_ancillary_variable and name_or_ancillary_variable.name() + ) or "" + if name_or_ancillary_variable is not None: + emsg = ( + "Expected to find exactly 1 ancillary_variable matching the " + f"given {bad_name!r} ancillary_variable's metadata, but found " + "none." + ) + raise iris.exceptions.AncillaryVariableNotFoundError(emsg) + msg = ( + f"Expected to find exactly 1 {bad_name!r} ancillary_variable, " + "but found none." + ) + raise iris.exceptions.AncillaryVariableNotFoundError(msg) + + return ancillary_variables[0] + + @property + def cell_methods(self) -> tuple[CellMethod, ...]: + """Tuple of :class:`iris.coords.CellMethod`. + Tuple of :class:`iris.coords.CellMethod` representing the processing done on the phenomenon. """ - return self._cell_methods + return self._metadata_manager.cell_methods @cell_methods.setter - def cell_methods(self, cell_methods): - self._cell_methods = tuple(cell_methods) if cell_methods else tuple() + def cell_methods( + self, + cell_methods: Iterable[CellMethod] | None, + ) -> None: + if not cell_methods: + # For backwards compatibility: Empty or null value is equivalent to (). + cell_methods = () + else: + # Can supply any iterable, which is converted (copied) to a tuple. + cell_methods = tuple(cell_methods) + for cell_method in cell_methods: + # All contents should be CellMethods. Requiring class membership is + # somewhat non-Pythonic, but simple, and not a problem for now. + if not isinstance(cell_method, iris.coords.CellMethod): + msg = ( # type: ignore[unreachable] + f"Cube.cell_methods assigned value includes {cell_method}, " + "which is not an iris.coords.CellMethod." + ) + raise ValueError(msg) + self._metadata_manager.cell_methods = cell_methods + + def core_data(self) -> np.ndarray | da.Array: + """Retrieve the data array of this :class:`~iris.cube.Cube`. - def core_data(self): - """ Retrieve the data array of this :class:`~iris.cube.Cube` in its current state, which will either be real or lazy. @@ -1650,26 +2785,23 @@ def core_data(self): return self._data_manager.core_data() @property - def shape(self): + def shape(self) -> tuple[int, ...]: """The shape of the data of this cube.""" return self._data_manager.shape @property def dtype(self): - """ - The data type of the values in the data array of this - :class:`~iris.cube.Cube`. - - """ + """The data type of the values in the data array of this :class:`~iris.cube.Cube`.""" return self._data_manager.dtype @property - def ndim(self): + def ndim(self) -> int: """The number of dimensions in the data of this cube.""" return self._data_manager.ndim - def lazy_data(self): - """ + def lazy_data(self) -> da.Array: + """Return a "lazy array" representing the Cube data. + Return a "lazy array" representing the Cube data. A lazy array describes an array whose data values have not been loaded into memory from disk. @@ -1684,21 +2816,22 @@ def lazy_data(self): does _not_ make the Cube data lazy again; the Cube data remains loaded in memory. - Returns: - A lazy array, representing the Cube data. + Returns + ------- + A lazy array, representing the Cube data. """ return self._data_manager.lazy_data() @property - def data(self): - """ - The :class:`numpy.ndarray` representing the multi-dimensional data of - the cube. + def data(self) -> np.ndarray: + """The :class:`numpy.ndarray` representing the multi-dimensional data of the cube. + Notes + ----- .. note:: - Cubes obtained from netCDF, PP, and FieldsFile files will only + Cubes obtained from NetCDF, PP, and FieldsFile files will only populate this attribute on its first use. To obtain the shape of the data without causing it to be loaded, @@ -1726,23 +2859,22 @@ def data(self): return self._data_manager.data @data.setter - def data(self, data): + def data(self, data: np.typing.ArrayLike) -> None: self._data_manager.data = data - def has_lazy_data(self): - """ - Details whether this :class:`~iris.cube.Cube` has lazy data. + def has_lazy_data(self) -> bool: + """Detail whether this :class:`~iris.cube.Cube` has lazy data. - Returns: - Boolean. + Returns + ------- + bool """ return self._data_manager.has_lazy_data() @property - def dim_coords(self): - """ - Return a tuple of all the dimension coordinates, ordered by dimension. + def dim_coords(self) -> tuple[DimCoord, ...]: + """Return a tuple of all the dimension coordinates, ordered by dimension. .. note:: @@ -1754,405 +2886,86 @@ def dim_coords(self): ``dimensions`` and ``dim_coords`` keyword arguments. """ - return tuple((coord for coord, dim in - sorted(self._dim_coords_and_dims, - key=lambda co_di: (co_di[1], co_di[0].name())))) + return tuple( + ( + coord + for coord, dim in sorted( + self._dim_coords_and_dims, + key=lambda co_di: (co_di[1], co_di[0].name()), + ) + ) + ) @property - def aux_coords(self): - """ - Return a tuple of all the auxiliary coordinates, ordered by - dimension(s). - - """ - return tuple((coord for coord, dims in - sorted(self._aux_coords_and_dims, - key=lambda co_di: (co_di[1], co_di[0].name())))) + def aux_coords(self) -> tuple[AuxCoord | DimCoord, ...]: + """Return a tuple of all the auxiliary coordinates, ordered by dimension(s).""" + return tuple( + ( + coord + for coord, dims in sorted( + self._aux_coords_and_dims, + key=lambda co_di: (co_di[1], co_di[0].name()), + ) + ) + ) @property - def derived_coords(self): - """ - Return a tuple of all the coordinates generated by the coordinate - factories. - - """ - return tuple(factory.make_coord(self.coord_dims) for factory in - sorted(self.aux_factories, - key=lambda factory: factory.name())) + def derived_coords(self) -> tuple[AuxCoord, ...]: + """Return a tuple of all the coordinates generated by the coordinate factories.""" + return tuple( + factory.make_coord(self.coord_dims) + for factory in sorted( + self.aux_factories, key=lambda factory: factory.name() + ) + ) @property - def aux_factories(self): + def aux_factories(self) -> tuple[AuxCoordFactory, ...]: """Return a tuple of all the coordinate factories.""" return tuple(self._aux_factories) - def _summary_coord_extra(self, coord, indent): - # Returns the text needed to ensure this coordinate can be - # distinguished from all others with the same name. - extra = '' - similar_coords = self.coords(coord.name()) - if len(similar_coords) > 1: - # Find all the attribute keys - keys = set() - for similar_coord in similar_coords: - keys.update(six.iterkeys(similar_coord.attributes)) - # Look for any attributes that vary - vary = set() - attributes = {} - for key in keys: - for similar_coord in similar_coords: - if key not in similar_coord.attributes: - vary.add(key) - break - value = similar_coord.attributes[key] - if attributes.setdefault(key, value) != value: - vary.add(key) - break - keys = sorted(vary & set(coord.attributes.keys())) - bits = ['{}={!r}'.format(key, coord.attributes[key]) for key in - keys] - if bits: - extra = indent + ', '.join(bits) - return extra - - def _summary_extra(self, coords, summary, indent): - # Where necessary, inserts extra lines into the summary to ensure - # coordinates can be distinguished. - new_summary = [] - for coord, summary in zip(coords, summary): - new_summary.append(summary) - extra = self._summary_coord_extra(coord, indent) - if extra: - new_summary.append(extra) - return new_summary - - def summary(self, shorten=False, name_padding=35): - """ - Unicode string summary of the Cube with name, a list of dim coord names - versus length and optionally relevant coordinate information. - - """ - # Create a set to contain the axis names for each data dimension. - dim_names = [set() for dim in range(len(self.shape))] - - # Add the dim_coord names that participate in the associated data - # dimensions. - for dim in range(len(self.shape)): - dim_coords = self.coords(contains_dimension=dim, dim_coords=True) - if dim_coords: - dim_names[dim].add(dim_coords[0].name()) - else: - dim_names[dim].add('-- ') + def summary(self, shorten: bool = False, name_padding: int = 35) -> str: + """Summary of the Cube. - # Convert axes sets to lists and sort. - dim_names = [sorted(names, key=sorted_axes) for names in dim_names] + String summary of the Cube with name+units, a list of dim coord names + versus length and, optionally, a summary of all other components. - # Generate textual summary of the cube dimensionality. - if self.shape == (): - dimension_header = 'scalar cube' - else: - dimension_header = '; '.join( - [', '.join(dim_names[dim]) + - ': %d' % dim_shape for dim, dim_shape in - enumerate(self.shape)]) - - nameunit = '{name} / ({units})'.format(name=self.name(), - units=self.units) - cube_header = '{nameunit!s:{length}} ({dimension})'.format( - length=name_padding, - nameunit=nameunit, - dimension=dimension_header) - summary = '' - - # Generate full cube textual summary. - if not shorten: - indent = 10 - extra_indent = ' ' * 13 - - # Cache the derived coords so we can rely on consistent - # object IDs. - derived_coords = self.derived_coords - # Determine the cube coordinates that are scalar (single-valued) - # AND non-dimensioned. - dim_coords = self.dim_coords - aux_coords = self.aux_coords - all_coords = dim_coords + aux_coords + derived_coords - scalar_coords = [coord for coord in all_coords if not - self.coord_dims(coord) and coord.shape == (1,)] - # Determine the cube coordinates that are not scalar BUT - # dimensioned. - scalar_coord_ids = set(map(id, scalar_coords)) - vector_dim_coords = [coord for coord in dim_coords if id(coord) not - in scalar_coord_ids] - vector_aux_coords = [coord for coord in aux_coords if id(coord) not - in scalar_coord_ids] - vector_derived_coords = [coord for coord in derived_coords if - id(coord) not in scalar_coord_ids] - - # cell measures - vector_cell_measures = [cm for cm in self.cell_measures() - if cm.shape != (1,)] - - # Determine the cube coordinates that don't describe the cube and - # are most likely erroneous. - vector_coords = vector_dim_coords + vector_aux_coords + \ - vector_derived_coords - ok_coord_ids = scalar_coord_ids.union(set(map(id, vector_coords))) - invalid_coords = [coord for coord in all_coords if id(coord) not - in ok_coord_ids] - - # Sort scalar coordinates by name. - scalar_coords.sort(key=lambda coord: coord.name()) - # Sort vector coordinates by data dimension and name. - vector_dim_coords.sort( - key=lambda coord: (self.coord_dims(coord), coord.name())) - vector_aux_coords.sort( - key=lambda coord: (self.coord_dims(coord), coord.name())) - vector_derived_coords.sort( - key=lambda coord: (self.coord_dims(coord), coord.name())) - # Sort other coordinates by name. - invalid_coords.sort(key=lambda coord: coord.name()) - - # - # Generate textual summary of cube vector coordinates. - # - def vector_summary(vector_coords, cube_header, max_line_offset, - cell_measures=None): - """ - Generates a list of suitably aligned strings containing coord - names and dimensions indicated by one or more 'x' symbols. - - .. note:: - - The function may need to update the cube header so this is - returned with the list of strings. - - """ - if cell_measures is None: - cell_measures = [] - vector_summary = [] - vectors = [] - - # Identify offsets for each dimension text marker. - alignment = np.array([index for index, value in - enumerate(cube_header) if - value == ':']) - - # Generate basic textual summary for each vector coordinate - # - WITHOUT dimension markers. - for coord in vector_coords + cell_measures: - vector_summary.append('%*s%s' % ( - indent, ' ', iris.util.clip_string(coord.name()))) - min_alignment = min(alignment) - - # Determine whether the cube header requires realignment - # due to one or more longer vector coordinate summaries. - if max_line_offset >= min_alignment: - delta = max_line_offset - min_alignment + 5 - cube_header = '%-*s (%s)' % (int(name_padding + delta), - self.name() or 'unknown', - dimension_header) - alignment += delta - - if vector_coords: - # Generate full textual summary for each vector coordinate - # - WITH dimension markers. - for index, coord in enumerate(vector_coords): - dims = self.coord_dims(coord) - - for dim in range(len(self.shape)): - width = alignment[dim] - len(vector_summary[index]) - char = 'x' if dim in dims else '-' - line = '{pad:{width}}{char}'.format(pad=' ', - width=width, - char=char) - vector_summary[index] += line - vectors = vectors + vector_coords - if cell_measures: - # Generate full textual summary for each vector coordinate - # - WITH dimension markers. - for index, coord in enumerate(cell_measures): - dims = self.cell_measure_dims(coord) - - for dim in range(len(self.shape)): - width = alignment[dim] - len(vector_summary[index]) - char = 'x' if dim in dims else '-' - line = '{pad:{width}}{char}'.format(pad=' ', - width=width, - char=char) - vector_summary[index] += line - vectors = vectors + cell_measures - # Interleave any extra lines that are needed to distinguish - # the coordinates. - vector_summary = self._summary_extra(vectors, - vector_summary, - extra_indent) - - return vector_summary, cube_header - - # Calculate the maximum line offset. - max_line_offset = 0 - for coord in all_coords: - max_line_offset = max(max_line_offset, len('%*s%s' % ( - indent, ' ', iris.util.clip_string(str(coord.name()))))) - - if vector_dim_coords: - dim_coord_summary, cube_header = vector_summary( - vector_dim_coords, cube_header, max_line_offset) - summary += '\n Dimension coordinates:\n' + \ - '\n'.join(dim_coord_summary) - - if vector_aux_coords: - aux_coord_summary, cube_header = vector_summary( - vector_aux_coords, cube_header, max_line_offset) - summary += '\n Auxiliary coordinates:\n' + \ - '\n'.join(aux_coord_summary) - - if vector_derived_coords: - derived_coord_summary, cube_header = vector_summary( - vector_derived_coords, cube_header, max_line_offset) - summary += '\n Derived coordinates:\n' + \ - '\n'.join(derived_coord_summary) - - # - # Generate summary of cube cell measures attribute - # - if vector_cell_measures: - cell_measure_summary, cube_header = vector_summary( - [], cube_header, max_line_offset, - cell_measures=vector_cell_measures) - summary += '\n Cell Measures:\n' - summary += '\n'.join(cell_measure_summary) - - # - # Generate textual summary of cube scalar coordinates. - # - scalar_summary = [] - - if scalar_coords: - for coord in scalar_coords: - if (coord.units in ['1', 'no_unit', 'unknown'] or - coord.units.is_time_reference()): - unit = '' - else: - unit = ' {!s}'.format(coord.units) - - # Format cell depending on type of point and whether it - # has a bound. - coord_cell = coord.cell(0) - if isinstance(coord_cell.point, six.string_types): - # Indent string type coordinates - coord_cell_split = [iris.util.clip_string(str(item)) - for item in - coord_cell.point.split('\n')] - line_sep = '\n{pad:{width}}'.format( - pad=' ', width=indent + len(coord.name()) + 2) - coord_cell_str = line_sep.join(coord_cell_split) + unit - else: - coord_cell_cpoint = coord_cell.point - coord_cell_cbound = coord_cell.bound - - coord_cell_str = '{!s}{}'.format(coord_cell_cpoint, - unit) - if coord_cell_cbound is not None: - bound = '({})'.format(', '.join(str(val) for - val in coord_cell_cbound)) - coord_cell_str += ', bound={}{}'.format(bound, - unit) - - scalar_summary.append('{pad:{width}}{name}: {cell}'.format( - pad=' ', width=indent, name=coord.name(), - cell=coord_cell_str)) - - # Interleave any extra lines that are needed to distinguish - # the coordinates. - scalar_summary = self._summary_extra(scalar_coords, - scalar_summary, - extra_indent) - - summary += '\n Scalar coordinates:\n' + '\n'.join( - scalar_summary) - - # - # Generate summary of cube's invalid coordinates. - # - if invalid_coords: - invalid_summary = [] - - for coord in invalid_coords: - invalid_summary.append( - '%*s%s' % (indent, ' ', coord.name())) - - # Interleave any extra lines that are needed to distinguish the - # coordinates. - invalid_summary = self._summary_extra( - invalid_coords, invalid_summary, extra_indent) - - summary += '\n Invalid coordinates:\n' + \ - '\n'.join(invalid_summary) - - # cell measures - scalar_cell_measures = [cm for cm in self.cell_measures() - if cm.shape == (1,)] - if scalar_cell_measures: - summary += '\n Scalar cell measures:\n' - scalar_cms = [' {}'.format(cm.name()) - for cm in scalar_cell_measures] - summary += '\n'.join(scalar_cms) - - # - # Generate summary of cube attributes. - # - if self.attributes: - attribute_lines = [] - for name, value in sorted(six.iteritems(self.attributes)): - value = iris.util.clip_string(six.text_type(value)) - line = u'{pad:{width}}{name}: {value}'.format(pad=' ', - width=indent, - name=name, - value=value) - attribute_lines.append(line) - summary += '\n Attributes:\n' + '\n'.join(attribute_lines) - - # - # Generate summary of cube cell methods - # - if self.cell_methods: - summary += '\n Cell methods:\n' - cm_lines = [] - - for cm in self.cell_methods: - cm_lines.append('%*s%s' % (indent, ' ', str(cm))) - summary += '\n'.join(cm_lines) - - # Construct the final cube summary. - summary = cube_header + summary + Parameters + ---------- + shorten : + If set, produce a one-line summary of minimal width, showing only + the cube name, units and dimensions. + When not set (default), produces a full multi-line summary string. + name_padding : + Control the *minimum* width of the cube name + units, + i.e. the indent of the dimension map section. - return summary + """ + from iris._representation.cube_printout import CubePrinter - def __str__(self): - # six has a decorator for this bit, but it doesn't do errors='replace'. - if six.PY3: - return self.summary() - else: - return self.summary().encode(errors='replace') + printer = CubePrinter(self) + summary = printer.to_string(oneline=shorten, name_padding=name_padding) + return summary - def __unicode__(self): + def __str__(self) -> str: return self.summary() - def __repr__(self): - return "" % self.summary(shorten=True, - name_padding=1) + def __repr__(self) -> str: + return "" % self.summary(shorten=True, name_padding=1) - def _repr_html_(self): + def _repr_html_(self) -> str: from iris.experimental.representation import CubeRepresentation + representer = CubeRepresentation(self) return representer.repr_html() - def __iter__(self): - raise TypeError('Cube is not iterable') + # Indicate that the iter option is not available. Python will raise + # TypeError with a useful message if a Cube is iterated over. + __iter__ = None + + def __getitem__(self, keys) -> Cube: + """Cube indexing has been implemented at the data level. - def __getitem__(self, keys): - """ Cube indexing (through use of square bracket notation) has been implemented at the data level. That is, the indices provided to this method should be aligned to the data of the cube, and thus the indices @@ -2164,21 +2977,31 @@ def __getitem__(self, keys): full_slice = iris.util._build_full_slice_given_keys(keys, self.ndim) def new_coord_dims(coord_): - return [dimension_mapping[d] - for d in self.coord_dims(coord_) - if dimension_mapping[d] is not None] + return [ + dimension_mapping[d] + for d in self.coord_dims(coord_) + if dimension_mapping[d] is not None + ] def new_cell_measure_dims(cm_): - return [dimension_mapping[d] - for d in self.cell_measure_dims(cm_) - if dimension_mapping[d] is not None] + return [ + dimension_mapping[d] + for d in self.cell_measure_dims(cm_) + if dimension_mapping[d] is not None + ] + + def new_ancillary_variable_dims(av_): + return [ + dimension_mapping[d] + for d in self.ancillary_variable_dims(av_) + if dimension_mapping[d] is not None + ] # Fetch the data as a generic array-like object. cube_data = self._data_manager.core_data() # Index with the keys, using orthogonal slicing. - dimension_mapping, data = iris.util._slice_data_with_keys( - cube_data, keys) + dimension_mapping, data = iris.util._slice_data_with_keys(cube_data, keys) # We don't want a view of the data, so take a copy of it. data = deepcopy(data) @@ -2187,12 +3010,11 @@ def new_cell_measure_dims(cm_): # results in numpy (v1.11.1) *always* returning a MaskedConstant # with a dtype of float64, regardless of the original masked # array dtype! - if isinstance(data, ma.core.MaskedConstant) and \ - data.dtype != cube_data.dtype: + if isinstance(data, ma.core.MaskedConstant) and data.dtype != cube_data.dtype: data = ma.array(data.data, mask=data.mask, dtype=cube_data.dtype) # Make the new cube slice - cube = Cube(data) + cube = self.__class__(data) cube.metadata = deepcopy(self.metadata) # Record a mapping from old coordinate IDs to new coordinates, @@ -2201,8 +3023,7 @@ def new_cell_measure_dims(cm_): # Slice the coords for coord in self.aux_coords: - coord_keys = tuple([full_slice[dim] for dim in - self.coord_dims(coord)]) + coord_keys = tuple([full_slice[dim] for dim in self.coord_dims(coord)]) try: new_coord = coord[coord_keys] except ValueError: @@ -2213,8 +3034,7 @@ def new_cell_measure_dims(cm_): coord_mapping[id(coord)] = new_coord for coord in self.dim_coords: - coord_keys = tuple([full_slice[dim] for dim in - self.coord_dims(coord)]) + coord_keys = tuple([full_slice[dim] for dim in self.coord_dims(coord)]) new_dims = new_coord_dims(coord) # Try/Catch to handle slicing that makes the points/bounds # non-monotonic @@ -2241,28 +3061,33 @@ def new_cell_measure_dims(cm_): dims = self.cell_measure_dims(cellmeasure) cm_keys = tuple([full_slice[dim] for dim in dims]) new_cm = cellmeasure[cm_keys] - cube.add_cell_measure(new_cm, - new_cell_measure_dims(cellmeasure)) + cube.add_cell_measure(new_cm, new_cell_measure_dims(cellmeasure)) + + # slice the ancillary variables and add them to the cube + for ancvar in self.ancillary_variables(): + dims = self.ancillary_variable_dims(ancvar) + av_keys = tuple([full_slice[dim] for dim in dims]) + new_av = ancvar[av_keys] + cube.add_ancillary_variable(new_av, new_ancillary_variable_dims(ancvar)) return cube - def subset(self, coord): - """ + def subset(self, coord: AuxCoord | DimCoord) -> Cube | None: + """Get a subset of the cube by providing the desired resultant coordinate. + Get a subset of the cube by providing the desired resultant coordinate. If the coordinate provided applies to the whole cube; the whole cube is returned. As such, the operation is not strict. """ if not isinstance(coord, iris.coords.Coord): - raise ValueError('coord_to_extract must be a valid Coord.') + raise ValueError("coord_to_extract must be a valid Coord.") # Get the coord to extract from the cube coord_to_extract = self.coord(coord) # If scalar, return the whole cube. Not possible to subset 1 point. - if coord_to_extract in self.aux_coords and\ - len(coord_to_extract.points) == 1: - + if coord_to_extract in self.aux_coords and len(coord_to_extract.points) == 1: # Default to returning None result = None @@ -2283,72 +3108,75 @@ def subset(self, coord): # Identify the indices which intersect the requested coord and # coord_to_extract - coord_indices = coord_to_extract.intersect(coord, - return_indices=True) + coord_indices = coord_to_extract.intersect(coord, return_indices=True) + + if coord_indices.size == 0: + # No matches found. + return None # Build up a slice which spans the whole of the cube full_slice = [slice(None, None)] * len(self.shape) # Update the full slice to only extract specific indices which # were identified above full_slice[coord_to_extract_dim] = coord_indices - full_slice = tuple(full_slice) - result = self[full_slice] + result = self[tuple(full_slice)] return result - def extract(self, constraint): - """ - Filter the cube by the given constraint using - :meth:`iris.Constraint.extract` method. - - """ + def extract(self, constraint: iris.Constraint | str | None) -> Cube: + """Filter cube by the given constraint using :meth:`iris.Constraint.extract`.""" # Cast the constraint into a proper constraint if it is not so already constraint = iris._constraints.as_constraint(constraint) return constraint.extract(self) - def intersection(self, *args, **kwargs): - """ - Return the intersection of the cube with specified coordinate - ranges. + def intersection(self, *args, **kwargs) -> Cube: + """Return the intersection of the cube with specified coordinate ranges. Coordinate ranges can be specified as: - (a) instances of :class:`iris.coords.CoordExtent`. - - (b) keyword arguments, where the keyword name specifies the name - of the coordinate (as defined in :meth:`iris.cube.Cube.coords()`) - and the value defines the corresponding range of coordinate - values as a tuple. The tuple must contain two, three, or four - items corresponding to: (minimum, maximum, min_inclusive, - max_inclusive). Where the items are defined as: - - * minimum - The minimum value of the range to select. - - * maximum - The maximum value of the range to select. - - * min_inclusive - If True, coordinate values equal to `minimum` will be included - in the selection. Default is True. - - * max_inclusive - If True, coordinate values equal to `maximum` will be included - in the selection. Default is True. - - To perform an intersection that ignores any bounds on the coordinates, - set the optional keyword argument *ignore_bounds* to True. Defaults to - False. - + * (a) positional arguments: instances of :class:`iris.coords.CoordExtent`, + or equivalent tuples of 3-5 items: + + * (b) keyword arguments, where the keyword name specifies the name + of the coordinate, and the value defines the corresponding range of + coordinate values as a tuple. The tuple must contain two, three, or + four items, corresponding to `(minimum, maximum, min_inclusive, + max_inclusive)` as defined above. + + Parameters + ---------- + coord : + Either a :class:`iris.coords.Coord`, or coordinate name + (as defined in :meth:`iris.cube.Cube.coords()`). + minimum : + The minimum value of the range to select. + maximum : + The maximum value of the range to select. + min_inclusive : + If True, coordinate values equal to `minimum` will be included + in the selection. Default is True. + max_inclusive : + If True, coordinate values equal to `maximum` will be included + in the selection. Default is True. + ignore_bounds : optional + Intersect based on points only. Default False. + threshold : optional + Minimum proportion of a bounded cell that must overlap with the + specified range. Default 0. + + Notes + ----- .. note:: For ranges defined over "circular" coordinates (i.e. those where the `units` attribute has a modulus defined) the cube - will be "rolled" to fit where neccesary. - - .. warning:: + will be "rolled" to fit where necessary. When requesting a + range that covers the entire modulus, a split cell will + preferentially be placed at the ``minimum`` end. - Currently this routine only works with "circular" - coordinates (as defined in the previous note.) + Warnings + -------- + Currently this routine only works with "circular" + coordinates (as defined in the previous note.) For example:: @@ -2357,7 +3185,7 @@ def intersection(self, *args, **kwargs): >>> print(cube.coord('longitude').points[::10]) [ 0. 37.49999237 74.99998474 112.49996948 \ 149.99996948 - 187.49995422 224.99993896 262.49993896 299.99993896 \ + 187.49995422 224.99993896 262.49993896 299.99993896 \ 337.49990845] >>> subset = cube.intersection(longitude=(30, 50)) >>> print(subset.coord('longitude').points) @@ -2366,39 +3194,60 @@ def intersection(self, *args, **kwargs): >>> print(subset.coord('longitude').points) [-7.50012207 -3.75012207 0. 3.75 7.5 ] - Returns: + Returns + ------- + :class:`~iris.cube.Cube` A new :class:`~iris.cube.Cube` giving the subset of the cube which intersects with the requested coordinate intervals. """ result = self - ignore_bounds = kwargs.pop('ignore_bounds', False) + ignore_bounds = kwargs.pop("ignore_bounds", False) + threshold = kwargs.pop("threshold", 0) for arg in args: - result = result._intersect(*arg, ignore_bounds=ignore_bounds) - for name, value in six.iteritems(kwargs): - result = result._intersect(name, *value, - ignore_bounds=ignore_bounds) + result = result._intersect( + *arg, ignore_bounds=ignore_bounds, threshold=threshold + ) # type: ignore[misc] + for name, value in kwargs.items(): + result = result._intersect( + name, *value, ignore_bounds=ignore_bounds, threshold=threshold + ) # type: ignore[misc] return result - def _intersect(self, name_or_coord, minimum, maximum, - min_inclusive=True, max_inclusive=True, - ignore_bounds=False): + def _intersect( + self, + name_or_coord: str + | DimCoord + | AuxCoord + | AuxCoordFactory + | CoordMetadata + | None, + minimum: float | int, + maximum: float | int, + min_inclusive: bool = True, + max_inclusive: bool = True, + ignore_bounds: bool = False, + threshold=0, + ) -> Cube: coord = self.coord(name_or_coord) if coord.ndim != 1: raise iris.exceptions.CoordinateMultiDimError(coord) if coord.nbounds not in (0, 2): - raise ValueError('expected 0 or 2 bound values per cell') + raise ValueError("expected 0 or 2 bound values per cell") if minimum > maximum: - raise ValueError('minimum greater than maximum') + raise ValueError("minimum greater than maximum") modulus = coord.units.modulus if modulus is None: - raise ValueError('coordinate units with no modulus are not yet' - ' supported') - subsets, points, bounds = self._intersect_modulus(coord, - minimum, maximum, - min_inclusive, - max_inclusive, - ignore_bounds) + raise ValueError("coordinate units with no modulus are not yet supported") + subsets, points, bounds = self._intersect_modulus( + coord, + minimum, + maximum, + min_inclusive, + max_inclusive, + ignore_bounds, + threshold, + ) # By this point we have either one or two subsets along the relevant # dimension. If it's just one subset (which might be a slice or an @@ -2417,19 +3266,14 @@ def make_chunk(key): chunk_coord.bounds = bounds[(key,)] return chunk - dim, = self.coord_dims(coord) + (dim,) = self.coord_dims(coord) key_tuple_prefix = (slice(None),) * dim chunks = [make_chunk(key) for key in subsets] if len(chunks) == 1: result = chunks[0] else: chunk_data = [chunk.core_data() for chunk in chunks] - if self.has_lazy_data(): - func = da.concatenate - else: - module = ma if ma.isMaskedArray(self.data) else np - func = module.concatenate - data = func(chunk_data, dim) + data = _lazy.concatenate(chunk_data, axis=dim) result = iris.cube.Cube(data) result.metadata = deepcopy(self.metadata) @@ -2441,26 +3285,29 @@ def create_coords(src_coords, add_coord): # Add copies of the source coordinates, selecting # the appropriate subsets out of coordinates which # share the intersection dimension. - preserve_circular = (min_inclusive and max_inclusive and - abs(maximum - minimum) == modulus) + preserve_circular = ( + min_inclusive + and max_inclusive + and abs(maximum - minimum) == modulus + ) for src_coord in src_coords: dims = self.coord_dims(src_coord) if dim in dims: dim_within_coord = dims.index(dim) - points = np.concatenate([chunk.coord(src_coord).points - for chunk in chunks], - dim_within_coord) + points = np.concatenate( + [chunk.coord(src_coord).points for chunk in chunks], + dim_within_coord, + ) if src_coord.has_bounds(): bounds = np.concatenate( - [chunk.coord(src_coord).bounds - for chunk in chunks], - dim_within_coord) + [chunk.coord(src_coord).bounds for chunk in chunks], + dim_within_coord, + ) else: bounds = None - result_coord = src_coord.copy(points=points, - bounds=bounds) + result_coord = src_coord.copy(points=points, bounds=bounds) - circular = getattr(result_coord, 'circular', False) + circular = getattr(result_coord, "circular", False) if circular and not preserve_circular: result_coord.circular = False else: @@ -2468,13 +3315,44 @@ def create_coords(src_coords, add_coord): add_coord(result_coord, dims) coord_mapping[id(src_coord)] = result_coord + def create_metadata(src_metadatas, add_metadata, get_metadata): + for src_metadata in src_metadatas: + dims = src_metadata.cube_dims(self) + if dim in dims: + dim_within_coord = dims.index(dim) + data = np.concatenate( + [ + get_metadata(chunk, src_metadata.name()).core_data() + for chunk in chunks + ], + dim_within_coord, + ) + result_coord = src_metadata.copy(values=data) + else: + result_coord = src_metadata.copy() + add_metadata(result_coord, dims) + create_coords(self.dim_coords, result.add_dim_coord) create_coords(self.aux_coords, result.add_aux_coord) + create_metadata( + self.cell_measures(), result.add_cell_measure, Cube.cell_measure + ) + create_metadata( + self.ancillary_variables(), + result.add_ancillary_variable, + Cube.ancillary_variable, + ) for factory in self.aux_factories: result.add_aux_factory(factory.updated(coord_mapping)) return result - def _intersect_derive_subset(self, coord, points, bounds, inside_indices): + def _intersect_derive_subset( + self, + coord: AuxCoord | DimCoord, + points: np.ndarray, + bounds: np.ndarray, + inside_indices: np.ndarray, + ) -> list[slice]: # Return the subsets, i.e. the means to allow the slicing of # coordinates to ensure that they remain contiguous. modulus = coord.units.modulus @@ -2483,8 +3361,7 @@ def _intersect_derive_subset(self, coord, points, bounds, inside_indices): non_zero_step_indices = np.nonzero(step)[0] def dim_coord_subset(): - """ - Derive the subset for dimension coordinates. + """Derive the subset for dimension coordinates. Ensure that we do not wrap if blocks are at the very edge. That is, if the very edge is wrapped and corresponds to base + period, @@ -2501,9 +3378,9 @@ def dim_coord_subset(): # Condition1: The two blocks don't themselves wrap # (inside_indices is contiguous). # Condition2: Are we chunked at either extreme edge. - edge_wrap = ((index_of_second_chunk == - inside_indices[end_of_first_chunk] + 1) and - index_of_second_chunk in (final_index, 1)) + edge_wrap = ( + index_of_second_chunk == inside_indices[end_of_first_chunk] + 1 + ) and index_of_second_chunk in (final_index, 1) subsets = None if edge_wrap: # Increasing coord @@ -2517,29 +3394,28 @@ def dim_coord_subset(): # Unwrap points and bounds (if present and equal base + period) if bounds is not None: - edge_equal_base_period = ( - np.isclose(coord.bounds[index_end, index_end], - coord.bounds[index_start, index_start] + - modulus)) + edge_equal_base_period = np.isclose( + coord.bounds[index_end, index_end], + coord.bounds[index_start, index_start] + modulus, + ) if edge_equal_base_period: bounds[index_end, :] = coord.bounds[index_end, :] else: - edge_equal_base_period = ( - np.isclose(coord.points[index_end], - coord.points[index_start] + - modulus)) + edge_equal_base_period = np.isclose( + coord.points[index_end], + coord.points[index_start] + modulus, + ) if edge_equal_base_period: points[index_end] = coord.points[index_end] - subsets = [slice(inside_indices[0], - inside_indices[-1] + 1)] + subsets = [slice(inside_indices[0], inside_indices[-1] + 1)] # Either no edge wrap or edge wrap != base + period # i.e. derive subset without alteration if subsets is None: subsets = [ slice(index_of_second_chunk, None), - slice(None, inside_indices[end_of_first_chunk] + 1) - ] + slice(None, inside_indices[end_of_first_chunk] + 1), + ] return subsets @@ -2558,150 +3434,212 @@ def dim_coord_subset(): subsets = [inside_indices] return subsets - def _intersect_modulus(self, coord, minimum, maximum, min_inclusive, - max_inclusive, ignore_bounds): + def _intersect_modulus( + self, + coord: AuxCoord | DimCoord, + minimum: float | int, + maximum: float | int, + min_inclusive: bool, + max_inclusive: bool, + ignore_bounds: bool, + threshold: float | int, + ) -> tuple[list[slice], np.ndarray, np.ndarray]: modulus = coord.units.modulus if maximum > minimum + modulus: - raise ValueError("requested range greater than coordinate's" - " unit's modulus") + raise ValueError("requested range greater than coordinate's unit's modulus") if coord.has_bounds(): values = coord.bounds else: + ignore_bounds = True values = coord.points if values.max() > values.min() + modulus: - raise ValueError("coordinate's range greater than coordinate's" - " unit's modulus") + raise ValueError( + "coordinate's range greater than coordinate's unit's modulus" + ) min_comp = np.less_equal if min_inclusive else np.less max_comp = np.less_equal if max_inclusive else np.less - if coord.has_bounds(): - bounds = wrap_lons(coord.bounds, minimum, modulus) - if ignore_bounds: - points = wrap_lons(coord.points, minimum, modulus) - inside_indices, = np.where( - np.logical_and(min_comp(minimum, points), - max_comp(points, maximum))) - else: - inside = np.logical_and(min_comp(minimum, bounds), - max_comp(bounds, maximum)) - inside_indices, = np.where(np.any(inside, axis=1)) - - # To ensure that bounds (and points) of matching cells aren't - # "scrambled" by the wrap operation we detect split cells that - # straddle the wrap point and choose a new wrap point which avoids - # split cells. - # For example: the cell [349.875, 350.4375] wrapped at -10 would - # become [349.875, -9.5625] which is no longer valid. The lower - # cell bound value (and possibly associated point) are - # recalculated so that they are consistent with the extended - # wrapping scheme which moves the wrap point to the correct lower - # bound value (-10.125) thus resulting in the cell no longer - # being split. For bounds which may extend exactly the length of - # the modulus, we simply preserve the point to bound difference, - # and call the new bounds = the new points + the difference. - pre_wrap_delta = np.diff(coord.bounds[inside_indices]) - post_wrap_delta = np.diff(bounds[inside_indices]) - close_enough = np.allclose(pre_wrap_delta, post_wrap_delta) - if not close_enough: - split_cell_indices, _ = np.where(pre_wrap_delta != - post_wrap_delta) - - # Recalculate the extended minimum. - indices = inside_indices[split_cell_indices] - cells = bounds[indices] - cells_delta = np.diff(coord.bounds[indices]) - - # Watch out for ascending/descending bounds - if cells_delta[0, 0] > 0: - cells[:, 0] = cells[:, 1] - cells_delta[:, 0] - minimum = np.min(cells[:, 0]) - else: - cells[:, 1] = cells[:, 0] + cells_delta[:, 0] - minimum = np.min(cells[:, 1]) - + if ignore_bounds: points = wrap_lons(coord.points, minimum, modulus) + bounds = coord.bounds + if bounds is not None: + # To avoid splitting any cells (by wrapping only one of its + # bounds), apply exactly the same wrapping as the points. + # Note that the offsets should be exact multiples of the + # modulus, but may initially be slightly off and need rounding. + wrap_offset = points - coord.points + wrap_offset = np.round(wrap_offset / modulus) * modulus + bounds = coord.bounds + wrap_offset[:, np.newaxis] + + # Check points only + (inside_indices,) = np.where( + np.logical_and(min_comp(minimum, points), max_comp(points, maximum)) + ) - bound_diffs = coord.points[:, np.newaxis] - coord.bounds - bounds = points[:, np.newaxis] - bound_diffs else: - points = wrap_lons(coord.points, minimum, modulus) - bounds = None - inside_indices, = np.where( - np.logical_and(min_comp(minimum, points), - max_comp(points, maximum))) + # Set up slices to account for ascending/descending bounds + if coord.bounds[0, 0] < coord.bounds[0, 1]: + ilower = (slice(None), 0) + iupper = (slice(None), 1) + else: + ilower = (slice(None), 1) + iupper = (slice(None), 0) + + # Initially wrap such that upper bounds are in [min, min + modulus] + # As with the ignore_bounds case, need to round to modulus due to + # floating point precision + upper = wrap_lons(coord.bounds[iupper], minimum, modulus) + wrap_offset = upper - coord.bounds[iupper] + wrap_offset = np.round(wrap_offset / modulus) * modulus + lower = coord.bounds[ilower] + wrap_offset + + # Scale threshold for each bound + thresholds = (upper - lower) * threshold + + # For a range that covers the whole modulus, there may be a + # cell that is "split" and could appear at either side of + # the range. Choose lower, unless there is not enough overlap. + if minimum + modulus == maximum and threshold == 0: + # Special case: overlapping in a single point + # (ie `minimum` itself) is always unintuitive + is_split = np.isclose(upper, minimum) + else: + is_split = upper - minimum < thresholds + wrap_offset += is_split * modulus + + # Apply wrapping + points = coord.points + wrap_offset + bounds = coord.bounds + wrap_offset[:, np.newaxis] + + # Interval [min, max] intersects [a, b] iff min <= b and a <= max + # (or < for non-inclusive min/max respectively). + # In this case, its length is L = min(max, b) - max(min, a) + upper = bounds[iupper] + lower = bounds[ilower] + overlap = np.where( + np.logical_and(min_comp(minimum, upper), max_comp(lower, maximum)), + np.minimum(maximum, upper) - np.maximum(minimum, lower), + np.nan, + ) + (inside_indices,) = np.where(overlap >= thresholds) # Determine the subsets - subsets = self._intersect_derive_subset(coord, points, bounds, - inside_indices) + subsets = self._intersect_derive_subset(coord, points, bounds, inside_indices) return subsets, points, bounds - def _as_list_of_coords(self, names_or_coords): - """ - Convert a name, coord, or list of names/coords to a list of coords. - """ + def _as_list_of_coords(self, names_or_coords) -> list[AuxCoord | DimCoord]: + """Convert a name, coord, or list of names/coords to a list of coords.""" # If not iterable, convert to list of a single item if _is_single_item(names_or_coords): names_or_coords = [names_or_coords] coords = [] for name_or_coord in names_or_coords: - if (isinstance(name_or_coord, six.string_types) or - isinstance(name_or_coord, iris.coords.Coord)): + if isinstance(name_or_coord, str) or isinstance( + name_or_coord, (iris.coords.DimCoord, iris.coords.AuxCoord) + ): coords.append(self.coord(name_or_coord)) else: # Don't know how to handle this type - msg = ("Don't know how to handle coordinate of type %s. " - "Ensure all coordinates are of type six.string_types " - "or iris.coords.Coord.") % (type(name_or_coord), ) + msg = ( + "Don't know how to handle coordinate of type %s. " + "Ensure all coordinates are of type str " + "or iris.coords.Coord." + ) % (type(name_or_coord),) raise TypeError(msg) return coords - def slices_over(self, ref_to_slice): - """ + def slices_over( + self, + ref_to_slice: str + | AuxCoord + | DimCoord + | int + | Iterable[str | AuxCoord | DimCoord | int], + ) -> Iterable[Cube]: + """Return an iterator of all subcubes. + Return an iterator of all subcubes along a given coordinate or dimension index, or multiple of these. - Args: - - * ref_to_slice (string, coord, dimension index or a list of these): + Parameters + ---------- + ref_to_slice : Determines which dimensions will be iterated along (i.e. the dimensions that are not returned in the subcubes). A mix of input types can also be provided. - Returns: - An iterator of subcubes. - - For example, to get all subcubes along the time dimension:: - - for sub_cube in cube.slices_over('time'): - print(sub_cube) - - .. seealso:: :meth:`iris.cube.Cube.slices`. - + Returns + ------- + An iterator of subcubes. + + Examples + -------- + For example, for a cube with dimensions `realization`, `time`, `latitude` and + `longitude`: + + >>> fname = iris.sample_data_path('GloSea4', 'ensemble_01[01].pp') + >>> cube = iris.load_cube(fname, 'surface_temperature') + >>> print(cube.summary(shorten=True)) + surface_temperature / (K) (realization: 2; time: 6; latitude: 145; longitude: 192) + + To get all 12x2D longitude/latitude subcubes: + + >>> for sub_cube in cube.slices_over(['realization', 'time']): + ... print(sub_cube.summary(shorten=True)) + surface_temperature / (K) (latitude: 145; longitude: 192) + surface_temperature / (K) (latitude: 145; longitude: 192) + surface_temperature / (K) (latitude: 145; longitude: 192) + surface_temperature / (K) (latitude: 145; longitude: 192) + surface_temperature / (K) (latitude: 145; longitude: 192) + surface_temperature / (K) (latitude: 145; longitude: 192) + surface_temperature / (K) (latitude: 145; longitude: 192) + surface_temperature / (K) (latitude: 145; longitude: 192) + surface_temperature / (K) (latitude: 145; longitude: 192) + surface_temperature / (K) (latitude: 145; longitude: 192) + surface_temperature / (K) (latitude: 145; longitude: 192) + surface_temperature / (K) (latitude: 145; longitude: 192) + + To get realizations as 2x3D separate subcubes, using the `realization` dimension index: + + >>> for sub_cube in cube.slices_over(0): + ... print(sub_cube.summary(shorten=True)) + surface_temperature / (K) (time: 6; latitude: 145; longitude: 192) + surface_temperature / (K) (time: 6; latitude: 145; longitude: 192) + + Notes + ----- .. note:: The order of dimension references to slice along does not affect the order of returned items in the iterator; instead the ordering is based on the fastest-changing dimension. - """ + See Also + -------- + iris.cube.Cube.slices : + Return an iterator of all subcubes given the coordinates or dimension indices. + + """ # noqa: D214, D406, D407, D410, D411 # Required to handle a mix between types. if _is_single_item(ref_to_slice): ref_to_slice = [ref_to_slice] - slice_dims = set() - for ref in ref_to_slice: + slice_dims: set[int] = set() + for ref in ref_to_slice: # type: ignore[union-attr] try: - coord, = self._as_list_of_coords(ref) + (coord,) = self._as_list_of_coords(ref) except TypeError: - dim = int(ref) + dim = int(ref) # type: ignore[arg-type] if dim < 0 or dim > self.ndim: - msg = ('Requested an iterator over a dimension ({}) ' - 'which does not exist.'.format(dim)) + msg = ( + "Requested an iterator over a dimension ({}) " + "which does not exist.".format(dim) + ) raise ValueError(msg) # Convert coord index to a single-element list to prevent a # TypeError when `slice_dims.update` is called with it. - dims = [dim] + dims: tuple[int, ...] = (dim,) else: dims = self.coord_dims(coord) slice_dims.update(dims) @@ -2710,38 +3648,83 @@ def slices_over(self, ref_to_slice): opposite_dims = list(all_dims - slice_dims) return self.slices(opposite_dims, ordered=False) - def slices(self, ref_to_slice, ordered=True): - """ + def slices( + self, + ref_to_slice: str + | AuxCoord + | DimCoord + | int + | Iterable[str | AuxCoord | DimCoord | int], + ordered: bool = True, + ) -> Iterator[Cube]: + """Return an iterator of all subcubes given the coordinates or dimension indices. + Return an iterator of all subcubes given the coordinates or dimension indices desired to be present in each subcube. - Args: - - * ref_to_slice (string, coord, dimension index or a list of these): + Parameters + ---------- + ref_to_slice : Determines which dimensions will be returned in the subcubes (i.e. the dimensions that are not iterated over). A mix of input types can also be provided. They must all be orthogonal (i.e. point to different dimensions). + ordered : + If True, subcube dimensions are ordered to match the dimension order + in `ref_to_slice`. If False, the order will follow that of + the source cube. + + Returns + ------- + An iterator of subcubes. + + Examples + -------- + For example, for a cube with dimensions `realization`, `time`, `latitude` and + `longitude`: + + >>> fname = iris.sample_data_path('GloSea4', 'ensemble_01[01].pp') + >>> cube = iris.load_cube(fname, 'surface_temperature') + >>> print(cube.summary(shorten=True)) + surface_temperature / (K) (realization: 2; time: 6; latitude: 145; longitude: 192) + + To get all 12x2D longitude/latitude subcubes: + + >>> for sub_cube in cube.slices(['longitude', 'latitude']): + ... print(sub_cube.summary(shorten=True)) + surface_temperature / (K) (longitude: 192; latitude: 145) + surface_temperature / (K) (longitude: 192; latitude: 145) + surface_temperature / (K) (longitude: 192; latitude: 145) + surface_temperature / (K) (longitude: 192; latitude: 145) + surface_temperature / (K) (longitude: 192; latitude: 145) + surface_temperature / (K) (longitude: 192; latitude: 145) + surface_temperature / (K) (longitude: 192; latitude: 145) + surface_temperature / (K) (longitude: 192; latitude: 145) + surface_temperature / (K) (longitude: 192; latitude: 145) + surface_temperature / (K) (longitude: 192; latitude: 145) + surface_temperature / (K) (longitude: 192; latitude: 145) + surface_temperature / (K) (longitude: 192; latitude: 145) - Kwargs: - - * ordered: if True, the order which the coords to slice or data_dims - are given will be the order in which they represent the data in - the resulting cube slices. If False, the order will follow that of - the source cube. Default is True. - Returns: - An iterator of subcubes. + .. warning:: + Note that the dimension order returned in the sub_cubes matches the order specified + in the ``cube.slices`` call, *not* the order of the dimensions in the original cube. - For example, to get all 2d longitude/latitude subcubes from a - multi-dimensional cube:: + To get all realizations as 2x3D separate subcubes, using the `time`, `latitude` + and `longitude` dimensions' indices: - for sub_cube in cube.slices(['longitude', 'latitude']): - print(sub_cube) + >>> for sub_cube in cube.slices([1, 2, 3]): + ... print(sub_cube.summary(shorten=True)) + surface_temperature / (K) (time: 6; latitude: 145; longitude: 192) + surface_temperature / (K) (time: 6; latitude: 145; longitude: 192) - .. seealso:: :meth:`iris.cube.Cube.slices_over`. + See Also + -------- + iris.cube.Cube.slices_over : + Return an iterator of all subcubes along a given coordinate or + dimension index. - """ + """ # noqa: D214, D406, D407, D410, D411 if not isinstance(ordered, bool): raise TypeError("'ordered' argument to slices must be boolean.") @@ -2749,15 +3732,17 @@ def slices(self, ref_to_slice, ordered=True): if _is_single_item(ref_to_slice): ref_to_slice = [ref_to_slice] - dim_to_slice = [] - for ref in ref_to_slice: + dim_to_slice: list[int] = [] + for ref in ref_to_slice: # type: ignore[union-attr] try: # attempt to handle as coordinate coord = self._as_list_of_coords(ref)[0] dims = self.coord_dims(coord) if not dims: - msg = ('Requested an iterator over a coordinate ({}) ' - 'which does not describe a dimension.') + msg = ( + "Requested an iterator over a coordinate ({}) " + "which does not describe a dimension." + ) msg = msg.format(coord.name()) raise ValueError(msg) dim_to_slice.extend(dims) @@ -2765,18 +3750,21 @@ def slices(self, ref_to_slice, ordered=True): except TypeError: try: # attempt to handle as dimension index - dim = int(ref) + dim = int(ref) # type: ignore[arg-type] except ValueError: - raise ValueError('{} Incompatible type {} for ' - 'slicing'.format(ref, type(ref))) + raise ValueError( + "{} Incompatible type {} for slicing".format(ref, type(ref)) + ) if dim < 0 or dim > self.ndim: - msg = ('Requested an iterator over a dimension ({}) ' - 'which does not exist.'.format(dim)) + msg = ( + "Requested an iterator over a dimension ({}) " + "which does not exist.".format(dim) + ) raise ValueError(msg) dim_to_slice.append(dim) if len(set(dim_to_slice)) != len(dim_to_slice): - msg = 'The requested coordinates are not orthogonal.' + msg = "The requested coordinates are not orthogonal." raise ValueError(msg) # Create a list with of the shape of our data @@ -2788,20 +3776,25 @@ def slices(self, ref_to_slice, ordered=True): return _SliceIterator(self, dims_index, dim_to_slice, ordered) - def transpose(self, new_order=None): - """ - Re-order the data dimensions of the cube in-place. + def transpose(self, new_order: list[int] | None = None) -> None: + """Re-order the data dimensions of the cube in-place. - new_order - list of ints, optional - By default, reverse the dimensions, otherwise permute the - axes according to the values given. + Parameters + ---------- + new_order : + By default, reverse the dimensions, otherwise permute the + axes according to the values given. + Notes + ----- .. note:: If defined, new_order must span all of the data dimensions. - Example usage:: + Examples + -------- + :: # put the second dimension first, followed by the third dimension, - and finally put the first dimension third:: + # and finally put the first dimension third:: >>> cube.transpose([1, 2, 0]) @@ -2815,7 +3808,7 @@ def transpose(self, new_order=None): new_order = list(new_order) if len(new_order) != self.ndim: - raise ValueError('Incorrect number of dimensions.') + raise ValueError("Incorrect number of dimensions.") # Transpose the data payload. dm = self._data_manager @@ -2834,77 +3827,91 @@ def remap_cube_metadata(metadata_and_dims): dims = dim_mapping[dims] return metadata, dims - self._dim_coords_and_dims = list(map(remap_cube_metadata, - self._dim_coords_and_dims)) - self._aux_coords_and_dims = list(map(remap_cube_metadata, - self._aux_coords_and_dims)) - self._cell_measures_and_dims = list(map(remap_cube_metadata, - self._cell_measures_and_dims)) - - def xml(self, checksum=False, order=True, byteorder=True): - """ - Returns a fully valid CubeML string representation of the Cube. - - """ + self._dim_coords_and_dims = list( + map(remap_cube_metadata, self._dim_coords_and_dims) + ) + self._aux_coords_and_dims = list( + map(remap_cube_metadata, self._aux_coords_and_dims) + ) + self._cell_measures_and_dims = list( + map(remap_cube_metadata, self._cell_measures_and_dims) + ) + self._ancillary_variables_and_dims = list( + map(remap_cube_metadata, self._ancillary_variables_and_dims) + ) + + def xml( + self, + checksum: bool = False, + order: bool = True, + byteorder: bool = True, + ) -> str: + """Return a fully valid CubeML string representation of the Cube.""" doc = Document() - cube_xml_element = self._xml_element(doc, checksum=checksum, - order=order, - byteorder=byteorder) + cube_xml_element = self._xml_element( + doc, checksum=checksum, order=order, byteorder=byteorder + ) cube_xml_element.setAttribute("xmlns", XML_NAMESPACE_URI) doc.appendChild(cube_xml_element) # Print our newly created XML + doc = self._sort_xml_attrs(doc) return doc.toprettyxml(indent=" ") def _xml_element(self, doc, checksum=False, order=True, byteorder=True): cube_xml_element = doc.createElement("cube") if self.standard_name: - cube_xml_element.setAttribute('standard_name', self.standard_name) + cube_xml_element.setAttribute("standard_name", self.standard_name) if self.long_name: - cube_xml_element.setAttribute('long_name', self.long_name) + cube_xml_element.setAttribute("long_name", self.long_name) if self.var_name: - cube_xml_element.setAttribute('var_name', self.var_name) - cube_xml_element.setAttribute('units', str(self.units)) - cube_xml_element.setAttribute('dtype', self.dtype.name) + cube_xml_element.setAttribute("var_name", self.var_name) + cube_xml_element.setAttribute("units", str(self.units)) + cube_xml_element.setAttribute("dtype", self.dtype.name) if self.attributes: - attributes_element = doc.createElement('attributes') - for name in sorted(six.iterkeys(self.attributes)): - attribute_element = doc.createElement('attribute') - attribute_element.setAttribute('name', name) + attributes_element = doc.createElement("attributes") + for name in sorted(self.attributes.keys()): + attribute_element = doc.createElement("attribute") + attribute_element.setAttribute("name", name) value = self.attributes[name] # Strict check because we don't want namedtuples. if type(value) in (list, tuple): - delimiter = '[]' if isinstance(value, list) else '()' - value = ', '.join(("'%s'" - if isinstance(item, six.string_types) - else '%s') % (item, ) for item in value) + delimiter = "[]" if isinstance(value, list) else "()" + value = ", ".join( + ("'%s'" if isinstance(item, str) else "%s") % (item,) + for item in value + ) value = delimiter[0] + value + delimiter[1] else: value = str(value) - attribute_element.setAttribute('value', value) + attribute_element.setAttribute("value", value) attributes_element.appendChild(attribute_element) cube_xml_element.appendChild(attributes_element) + def dimmeta_xml_element(element, typename, dimscall): + # Make an inner xml element for a cube DimensionalMetadata element, with a + # 'datadims' property showing how it maps to the parent cube dims. + xml_element = doc.createElement(typename) + dims = list(dimscall(element)) + if dims: + xml_element.setAttribute("datadims", repr(dims)) + xml_element.appendChild(element.xml_element(doc)) + return xml_element + coords_xml_element = doc.createElement("coords") for coord in sorted(self.coords(), key=lambda coord: coord.name()): # make a "cube coordinate" element which holds the dimensions (if # appropriate) which itself will have a sub-element of the # coordinate instance itself. - cube_coord_xml_element = doc.createElement("coord") - coords_xml_element.appendChild(cube_coord_xml_element) - - dims = list(self.coord_dims(coord)) - if dims: - cube_coord_xml_element.setAttribute("datadims", repr(dims)) - - coord_xml_element = coord.xml_element(doc) - cube_coord_xml_element.appendChild(coord_xml_element) + coords_xml_element.appendChild( + dimmeta_xml_element(coord, "coord", self.coord_dims) + ) cube_xml_element.appendChild(coords_xml_element) # cell methods (no sorting!) @@ -2914,8 +3921,32 @@ def _xml_element(self, doc, checksum=False, order=True, byteorder=True): cell_methods_xml_element.appendChild(cell_method_xml_element) cube_xml_element.appendChild(cell_methods_xml_element) + # cell measures + cell_measures = sorted(self.cell_measures(), key=lambda cm: cm.name()) + if cell_measures: + # This one is an optional subelement. + cms_xml_element = doc.createElement("cellMeasures") + for cm in cell_measures: + cms_xml_element.appendChild( + dimmeta_xml_element(cm, "cell-measure", self.cell_measure_dims) + ) + cube_xml_element.appendChild(cms_xml_element) + + # ancillary variables + ancils = sorted(self.ancillary_variables(), key=lambda anc: anc.name()) + if ancils: + # This one is an optional subelement. + ancs_xml_element = doc.createElement("ancillaryVariables") + for anc in ancils: + ancs_xml_element.appendChild( + dimmeta_xml_element( + anc, "ancillary-var", self.ancillary_variable_dims + ) + ) + cube_xml_element.appendChild(ancs_xml_element) + + # data data_xml_element = doc.createElement("data") - data_xml_element.setAttribute("shape", str(self.shape)) # NB. Getting a checksum triggers any deferred loading, @@ -2927,9 +3958,9 @@ def _xml_element(self, doc, checksum=False, order=True, byteorder=True): # Ensure consistent memory layout for checksums. def normalise(data): data = np.ascontiguousarray(data) - if data.dtype.newbyteorder('<') != data.dtype: + if data.dtype.newbyteorder("<") != data.dtype: data = data.byteswap(False) - data.dtype = data.dtype.newbyteorder('<') + data.dtype = data.dtype.newbyteorder("<") return data if ma.isMaskedArray(data): @@ -2937,17 +3968,15 @@ def normalise(data): # sensitive to unused numbers. Use a fixed value so # a change in fill_value doesn't affect the # checksum. - crc = '0x%08x' % ( - zlib.crc32(normalise(data.filled(0))) & 0xffffffff, ) + crc = "0x%08x" % (zlib.crc32(normalise(data.filled(0))) & 0xFFFFFFFF,) data_xml_element.setAttribute("checksum", crc) if ma.is_masked(data): - crc = '0x%08x' % ( - zlib.crc32(normalise(data.mask)) & 0xffffffff, ) + crc = "0x%08x" % (zlib.crc32(normalise(data.mask)) & 0xFFFFFFFF,) else: - crc = 'no-masked-elements' + crc = "no-masked-elements" data_xml_element.setAttribute("mask_checksum", crc) else: - crc = '0x%08x' % (zlib.crc32(normalise(data)) & 0xffffffff, ) + crc = "0x%08x" % (zlib.crc32(normalise(data)) & 0xFFFFFFFF,) data_xml_element.setAttribute("checksum", crc) elif self.has_lazy_data(): data_xml_element.setAttribute("state", "deferred") @@ -2961,55 +3990,54 @@ def normalise(data): dtype = data.dtype def _order(array): - order = '' - if array.flags['C_CONTIGUOUS']: - order = 'C' - elif array.flags['F_CONTIGUOUS']: - order = 'F' + order = "" + if array.flags["C_CONTIGUOUS"]: + order = "C" + elif array.flags["F_CONTIGUOUS"]: + order = "F" return order + if order: - data_xml_element.setAttribute('order', _order(data)) + data_xml_element.setAttribute("order", _order(data)) # NB. dtype.byteorder can return '=', which is bad for # cross-platform consistency - so we use dtype.str # instead. if byteorder: - array_byteorder = {'>': 'big', '<': 'little'}.get(dtype.str[0]) + array_byteorder = {">": "big", "<": "little"}.get(dtype.str[0]) if array_byteorder is not None: - data_xml_element.setAttribute('byteorder', array_byteorder) + data_xml_element.setAttribute("byteorder", array_byteorder) if order and ma.isMaskedArray(data): - data_xml_element.setAttribute('mask_order', - _order(data.mask)) + data_xml_element.setAttribute("mask_order", _order(data.mask)) else: dtype = self.lazy_data().dtype - data_xml_element.setAttribute('dtype', dtype.name) + data_xml_element.setAttribute("dtype", dtype.name) cube_xml_element.appendChild(data_xml_element) return cube_xml_element - def copy(self, data=None): - """ - Returns a deep copy of this cube. - - Kwargs: + def copy(self, data: np.typing.ArrayLike | None = None) -> Cube: + """Return a deep copy of this cube. - * data: + Parameters + ---------- + data : Replace the data of the cube copy with provided data payload. - Returns: - A copy instance of the :class:`Cube`. + Returns + ------- + A copy instance of the :class:`Cube`. """ - memo = {} + memo: dict[int, Any] = {} cube = self._deepcopy(memo, data=data) return cube def __copy__(self): """Shallow copying is disallowed for Cubes.""" - raise copy.Error("Cube shallow-copy not allowed. Use deepcopy() or " - "Cube.copy()") + raise copy.Error("Cube shallow-copy not allowed. Use deepcopy() or Cube.copy()") def __deepcopy__(self, memo): return self._deepcopy(memo) @@ -3019,22 +4047,32 @@ def _deepcopy(self, memo, data=None): new_dim_coords_and_dims = deepcopy(self._dim_coords_and_dims, memo) new_aux_coords_and_dims = deepcopy(self._aux_coords_and_dims, memo) + new_cell_measures_and_dims = deepcopy(self._cell_measures_and_dims, memo) + new_ancillary_variables_and_dims = deepcopy( + self._ancillary_variables_and_dims, memo + ) # Record a mapping from old coordinate IDs to new coordinates, # for subsequent use in creating updated aux_factories. coord_mapping = {} - for old_pair, new_pair in zip(self._dim_coords_and_dims, - new_dim_coords_and_dims): + for old_pair, new_pair in zip( + self._dim_coords_and_dims, new_dim_coords_and_dims + ): coord_mapping[id(old_pair[0])] = new_pair[0] - for old_pair, new_pair in zip(self._aux_coords_and_dims, - new_aux_coords_and_dims): + for old_pair, new_pair in zip( + self._aux_coords_and_dims, new_aux_coords_and_dims + ): coord_mapping[id(old_pair[0])] = new_pair[0] - new_cube = Cube(dm.core_data(), - dim_coords_and_dims=new_dim_coords_and_dims, - aux_coords_and_dims=new_aux_coords_and_dims) + new_cube = Cube( + dm.core_data(), + dim_coords_and_dims=new_dim_coords_and_dims, + aux_coords_and_dims=new_aux_coords_and_dims, + cell_measures_and_dims=new_cell_measures_and_dims, + ancillary_variables_and_dims=new_ancillary_variables_and_dims, + ) new_cube.metadata = deepcopy(self.metadata, memo) @@ -3045,6 +4083,9 @@ def _deepcopy(self, memo, data=None): # START OPERATOR OVERLOADS def __eq__(self, other): + if other is self: + return True + result = NotImplemented if isinstance(other, Cube): @@ -3052,15 +4093,44 @@ def __eq__(self, other): # having checked the metadata, now check the coordinates if result: - coord_comparison = iris.analysis.coord_comparison(self, other) + coord_compares = iris.analysis._dimensional_metadata_comparison( + self, other + ) # if there are any coordinates which are not equal - result = not (coord_comparison['not_equal'] or - coord_comparison['non_equal_data_dimension']) + result = not ( + coord_compares["not_equal"] + or coord_compares["non_equal_data_dimension"] + ) + + if result: + cm_compares = iris.analysis._dimensional_metadata_comparison( + self, other, object_get=Cube.cell_measures + ) + # if there are any cell measures which are not equal + result = not ( + cm_compares["not_equal"] or cm_compares["non_equal_data_dimension"] + ) + + if result: + av_compares = iris.analysis._dimensional_metadata_comparison( + self, other, object_get=Cube.ancillary_variables + ) + # if there are any ancillary variables which are not equal + result = not ( + av_compares["not_equal"] or av_compares["non_equal_data_dimension"] + ) # Having checked everything else, check approximate data equality. if result: - result = da.allclose(self.core_data(), - other.core_data()).compute() + # TODO: why do we use allclose() here, but strict equality in + # _DimensionalMetadata (via util.array_equal())? + result = bool( + np.allclose( + self.core_data(), + other.core_data(), + equal_nan=True, + ) + ) return result # Must supply __ne__, Python does not defer to __eq__ for negative equality @@ -3093,29 +4163,48 @@ def __sub__(self, other): def __isub__(self, other): return iris.analysis.maths.subtract(self, other, in_place=True) - __mul__ = iris.analysis.maths.multiply - __rmul__ = iris.analysis.maths.multiply + def __rsub__(self, other): + return (-self) + other + + def __mul__(self, other): + return iris.analysis.maths.multiply(self, other) def __imul__(self, other): return iris.analysis.maths.multiply(self, other, in_place=True) + __rmul__ = __mul__ + __div__ = iris.analysis.maths.divide def __idiv__(self, other): return iris.analysis.maths.divide(self, other, in_place=True) - __truediv__ = iris.analysis.maths.divide + def __rdiv__(self, other): + data = 1 / self.core_data() + reciprocal = self.copy(data=data) + reciprocal.units = reciprocal.units**-1 + return iris.analysis.maths.multiply(reciprocal, other) - def __itruediv__(self, other): - return iris.analysis.maths.divide(self, other, in_place=True) + __truediv__ = __div__ + + __itruediv__ = __idiv__ + + __rtruediv__ = __rdiv__ __pow__ = iris.analysis.maths.exponentiate + + def __neg__(self): + return self.copy(data=-self.core_data()) + # END OPERATOR OVERLOADS - def collapsed(self, coords, aggregator, **kwargs): - """ - Collapse one or more dimensions over the cube given the coordinate/s - and an aggregation. + def collapsed( + self, + coords: str | AuxCoord | DimCoord | Iterable[str | AuxCoord | DimCoord], + aggregator: iris.analysis.Aggregator, + **kwargs, + ) -> Cube: + """Collapse one or more dimensions over the cube given the coordinate/s and an aggregation. Examples of aggregations that may be used include :data:`~iris.analysis.COUNT` and :data:`~iris.analysis.MAX`. @@ -3125,9 +4214,15 @@ def collapsed(self, coords, aggregator, **kwargs): sum :data:`~iris.analysis.SUM`. Weighted aggregations support an optional *weights* keyword argument. - If set, this should be supplied as an array of weights whose shape - matches the cube. Values for latitude-longitude area weights may be - calculated using :func:`iris.analysis.cartography.area_weights`. + If set, this can be supplied as an array, cube, or (names of) + :meth:`~iris.cube.Cube.coords`, :meth:`~iris.cube.Cube.cell_measures`, + or :meth:`~iris.cube.Cube.ancillary_variables`. In all cases, the + weights should be 1d (for collapsing over a 1d coordinate) or match the + shape of the cube. When weights are not given as arrays, units are + correctly handled for weighted sums, i.e., the original unit of the + cube is multiplied by the units of the weights. Values for + latitude-longitude area weights may be calculated using + :func:`iris.analysis.cartography.area_weights`. Some Iris aggregators support "lazy" evaluation, meaning that cubes resulting from this method may represent data arrays which are @@ -3136,25 +4231,22 @@ def collapsed(self, coords, aggregator, **kwargs): it will be used wherever possible when this cube's data is itself a deferred array. - Args: - - * coords (string, coord or a list of strings/coords): + Parameters + ---------- + coords : Coordinate names/coordinates over which the cube should be collapsed. - - * aggregator (:class:`iris.analysis.Aggregator`): + aggregator : Aggregator to be applied for collapse operation. - - Kwargs: - - * kwargs: + **kwargs : Aggregation function keyword arguments. - Returns: - Collapsed cube. - - For example: + Returns + ------- + Collapsed cube. + Examples + -------- >>> import iris >>> import iris.analysis >>> path = iris.sample_data_path('ostia_monthly.nc') @@ -3162,22 +4254,24 @@ def collapsed(self, coords, aggregator, **kwargs): >>> new_cube = cube.collapsed('longitude', iris.analysis.MEAN) >>> print(new_cube) surface_temperature / (K) (time: 54; latitude: 18) - Dimension coordinates: - time x - - latitude - x - Auxiliary coordinates: - forecast_reference_time x - - Scalar coordinates: - forecast_period: 0 hours - longitude: 180.0 degrees, bound=(0.0, 360.0) degrees - Attributes: - Conventions: CF-1.5 - STASH: m01s00i024 - Cell methods: - mean: month, year - mean: longitude - - + Dimension coordinates: + time x - + latitude - x + Auxiliary coordinates: + forecast_reference_time x - + Scalar coordinates: + forecast_period 0 hours + longitude \ +180.0 degrees, bound=(0.0, 360.0) degrees + Cell methods: + 0 month: year: mean + 1 longitude: mean + Attributes: + Conventions 'CF-1.5' + STASH m01s00i024 + + Notes + ----- .. note:: Some aggregations are not commutative and hence the order of @@ -3205,57 +4299,64 @@ def collapsed(self, coords, aggregator, **kwargs): cube.collapsed(['latitude', 'longitude'], iris.analysis.VARIANCE) """ + # Update weights kwargs (if necessary) to handle different types of + # weights + weights_info = None + if kwargs.get("weights") is not None: + weights_info = _Weights(kwargs["weights"], self) + kwargs["weights"] = weights_info.array + # Convert any coordinate names to coordinates - coords = self._as_list_of_coords(coords) + coordinates = self._as_list_of_coords(coords) - if (isinstance(aggregator, iris.analysis.WeightedAggregator) and - not aggregator.uses_weighting(**kwargs)): + if isinstance( + aggregator, iris.analysis.WeightedAggregator + ) and not aggregator.uses_weighting(**kwargs): msg = "Collapsing spatial coordinate {!r} without weighting" - lat_match = [coord for coord in coords - if 'latitude' in coord.name()] + lat_match = [coord for coord in coordinates if "latitude" in coord.name()] if lat_match: for coord in lat_match: - warnings.warn(msg.format(coord.name())) + warnings.warn( + msg.format(coord.name()), + category=iris.warnings.IrisUserWarning, + ) # Determine the dimensions we need to collapse (and those we don't) - if aggregator.cell_method == 'peak': - dims_to_collapse = [list(self.coord_dims(coord)) - for coord in coords] - - # Remove duplicate dimensions. - new_dims = OrderedDict.fromkeys( - d for dim in dims_to_collapse for d in dim) - # Reverse the dimensions so the order can be maintained when - # reshaping the data. - dims_to_collapse = list(new_dims)[::-1] - else: - dims_to_collapse = set() - for coord in coords: - dims_to_collapse.update(self.coord_dims(coord)) + # Remove duplicate dimensions and reverse the dimensions so the order + # can be maintained when reshaping the data. + dims_to_collapse = list( + dict.fromkeys(d for coord in coordinates for d in self.coord_dims(coord)) + )[::-1] + + if aggregator.name() == "max_run" and len(dims_to_collapse) > 1: + msg = "Not possible to calculate runs over more than one dimension" + raise ValueError(msg) if not dims_to_collapse: - msg = 'Cannot collapse a dimension which does not describe any ' \ - 'data.' + msg = "Cannot collapse a dimension which does not describe any data." raise iris.exceptions.CoordinateCollapseError(msg) - untouched_dims = set(range(self.ndim)) - set(dims_to_collapse) + untouched_dims = sorted(set(range(self.ndim)) - set(dims_to_collapse)) + + collapsed_cube = iris.util._strip_metadata_from_dims(self, dims_to_collapse) # Remove the collapsed dimension(s) from the metadata - indices = [slice(None, None)] * self.ndim + indices: list[slice | int] = [slice(None, None)] * self.ndim for dim in dims_to_collapse: indices[dim] = 0 - collapsed_cube = self[tuple(indices)] + collapsed_cube = collapsed_cube[tuple(indices)] - # Collapse any coords that span the dimension(s) being collapsed + # Collapse any coordinates that span the dimension(s) being collapsed for coord in self.dim_coords + self.aux_coords: coord_dims = self.coord_dims(coord) if set(dims_to_collapse).intersection(coord_dims): - local_dims = [coord_dims.index(dim) for dim in - dims_to_collapse if dim in coord_dims] + local_dims = [ + coord_dims.index(dim) + for dim in dims_to_collapse + if dim in coord_dims + ] collapsed_cube.replace_coord(coord.collapsed(local_dims)) - untouched_dims = sorted(untouched_dims) - # Record the axis(s) argument passed to 'aggregation', so the same is # passed to the 'update_metadata' function. collapse_axis = -1 @@ -3263,33 +4364,36 @@ def collapsed(self, coords, aggregator, **kwargs): data_result = None # Perform the actual aggregation. - if aggregator.cell_method == 'peak': + if aggregator.cell_method == "peak": # The PEAK aggregator must collapse each coordinate separately. untouched_shape = [self.shape[d] for d in untouched_dims] collapsed_shape = [self.shape[d] for d in dims_to_collapse] new_shape = untouched_shape + collapsed_shape array_dims = untouched_dims + dims_to_collapse - unrolled_data = np.transpose( - self.data, array_dims).reshape(new_shape) + unrolled_data = np.transpose(self.data, array_dims).reshape(new_shape) for dim in dims_to_collapse: - unrolled_data = aggregator.aggregate(unrolled_data, - axis=-1, - **kwargs) + unrolled_data = aggregator.aggregate(unrolled_data, axis=-1, **kwargs) data_result = unrolled_data # Perform the aggregation in lazy form if possible. - elif (aggregator.lazy_func is not None and self.has_lazy_data()): + elif aggregator.lazy_func is not None and self.has_lazy_data(): # Use a lazy operation separately defined by the aggregator, based # on the cube lazy array. # NOTE: do not reform the data in this case, as 'lazy_aggregate' # accepts multiple axes (unlike 'aggregate'). - collapse_axis = list(dims_to_collapse) + if len(dims_to_collapse) == 1: + # Replace a "list of 1 axes" with just a number : This single-axis form is *required* by functions + # like da.average (and np.average), if a 1d weights array is specified. + collapse_axes: int | list[int] = dims_to_collapse[0] + else: + collapse_axes = list(dims_to_collapse) + try: - data_result = aggregator.lazy_aggregate(self.lazy_data(), - axis=collapse_axis, - **kwargs) + data_result = aggregator.lazy_aggregate( + self.lazy_data(), axis=collapse_axes, **kwargs + ) except TypeError: # TypeError - when unexpected keywords passed through (such as # weights to mean) @@ -3303,59 +4407,86 @@ def collapsed(self, coords, aggregator, **kwargs): # over are grouped 'at the end' (i.e. axis=-1). dims_to_collapse = sorted(dims_to_collapse) - end_size = reduce(operator.mul, (self.shape[dim] for dim in - dims_to_collapse)) + end_size = reduce( + operator.mul, (self.shape[dim] for dim in dims_to_collapse) + ) untouched_shape = [self.shape[dim] for dim in untouched_dims] new_shape = untouched_shape + [end_size] dims = untouched_dims + dims_to_collapse unrolled_data = np.transpose(self.data, dims).reshape(new_shape) # Perform the same operation on the weights if applicable - if kwargs.get("weights") is not None: - weights = kwargs["weights"].view() - kwargs["weights"] = np.transpose(weights, - dims).reshape(new_shape) - - data_result = aggregator.aggregate(unrolled_data, - axis=-1, - **kwargs) - aggregator.update_metadata(collapsed_cube, coords, axis=collapse_axis, - **kwargs) - result = aggregator.post_process(collapsed_cube, data_result, coords, - **kwargs) + weights = kwargs.get("weights") + if weights is not None and weights.ndim > 1: + # Note: *don't* adjust 1d weights arrays, these have a special meaning for statistics functions. + weights = weights.view() + kwargs["weights"] = np.transpose(weights, dims).reshape(new_shape) + + data_result = aggregator.aggregate(unrolled_data, axis=-1, **kwargs) + + aggregator.update_metadata( + collapsed_cube, + coordinates, + axis=collapse_axis, + _weights_units=getattr(weights_info, "units", None), + **kwargs, + ) + result = aggregator.post_process( + collapsed_cube, data_result, coordinates, **kwargs + ) return result - def aggregated_by(self, coords, aggregator, **kwargs): - """ - Perform aggregation over the cube given one or more "group - coordinates". + def aggregated_by( + self, + coords: str | AuxCoord | DimCoord | Iterable[str | AuxCoord | DimCoord], + aggregator: iris.analysis.Aggregator, + climatological: bool = False, + **kwargs, + ) -> Cube: + """Perform aggregation over the cube given one or more "group coordinates". A "group coordinate" is a coordinate where repeating values represent a - single group, such as a month coordinate on a daily time slice. - Repeated values will form a group even if they are not consecutive. + single group, such as a month coordinate on a daily time slice. Repeated + values will form a group even if they are not consecutive. The group coordinates must all be over the same cube dimension. Each common value group identified over all the group-by coordinates is collapsed using the provided aggregator. - Args: + Weighted aggregations (:class:`iris.analysis.WeightedAggregator`) may + also be supplied. These include :data:`~iris.analysis.MEAN` and + :data:`~iris.analysis.SUM`. - * coords (list of coord names or :class:`iris.coords.Coord` instances): + Weighted aggregations support an optional *weights* keyword argument. + If set, this can be supplied as an array, cube, or (names of) + :meth:`~iris.cube.Cube.coords`, :meth:`~iris.cube.Cube.cell_measures`, + or :meth:`~iris.cube.Cube.ancillary_variables`. In all cases, the + weights should be 1d or match the shape of the cube. When weights are + not given as arrays, units are correctly handled for weighted sums, + i.e., the original unit of the cube is multiplied by the units of the + weights. + + Parameters + ---------- + coords : One or more coordinates over which group aggregation is to be performed. - * aggregator (:class:`iris.analysis.Aggregator`): + aggregator : Aggregator to be applied to each group. - - Kwargs: - - * kwargs: + climatological : + Indicates whether the output is expected to be climatological. For + any aggregated time coord(s), this causes the climatological flag to + be set and the point for each cell to equal its first bound, thereby + preserving the time of year. + **kwargs : Aggregator and aggregation function keyword arguments. - Returns: - :class:`iris.cube.Cube`. - - For example: + Returns + ------- + :class:`iris.cube.Cube` + Examples + -------- >>> import iris >>> import iris.analysis >>> import iris.coord_categorisation as cat @@ -3366,258 +4497,354 @@ def aggregated_by(self, coords, aggregator, **kwargs): >>> print(new_cube) surface_temperature / (K) \ (time: 5; latitude: 18; longitude: 432) - Dimension coordinates: - time \ - x - - - latitude \ - - x - - longitude \ - - - x - Auxiliary coordinates: - forecast_reference_time \ - x - - - year \ - x - - - Scalar coordinates: - forecast_period: 0 hours - Attributes: - Conventions: CF-1.5 - STASH: m01s00i024 - Cell methods: - mean: month, year - mean: year + Dimension coordinates: + time \ +x - - + latitude \ +- x - + longitude \ +- - x + Auxiliary coordinates: + forecast_reference_time \ +x - - + year \ +x - - + Scalar coordinates: + forecast_period 0 hours + Cell methods: + 0 month: year: mean + 1 year: mean + Attributes: + Conventions 'CF-1.5' + STASH m01s00i024 """ - groupby_coords = [] - dimension_to_groupby = None + # Update weights kwargs (if necessary) to handle different types of + # weights + weights_info = None + if kwargs.get("weights") is not None: + weights_info = _Weights(kwargs["weights"], self) + kwargs["weights"] = weights_info.array - # We can't handle weights - if isinstance(aggregator, iris.analysis.WeightedAggregator) and \ - aggregator.uses_weighting(**kwargs): - raise ValueError('Invalid Aggregation, aggregated_by() cannot use' - ' weights.') + groupby_coords = [] + dimension_to_groupby: int | None = None - coords = self._as_list_of_coords(coords) - for coord in sorted(coords, key=lambda coord: coord._as_defn()): + coordinates = self._as_list_of_coords(coords) + for coord in sorted(coordinates, key=lambda coord: coord.metadata): if coord.ndim > 1: - msg = 'Cannot aggregate_by coord %s as it is ' \ - 'multidimensional.' % coord.name() + msg = ( + "Cannot aggregate_by coord %s as it is " + "multidimensional." % coord.name() + ) raise iris.exceptions.CoordinateMultiDimError(msg) - dimension = self.coord_dims(coord) - if not dimension: - msg = 'Cannot group-by the coordinate "%s", as its ' \ - 'dimension does not describe any data.' % coord.name() + dimensions = self.coord_dims(coord) + if not dimensions: + msg = ( + 'Cannot group-by the coordinate "%s", as its ' + "dimension does not describe any data." % coord.name() + ) raise iris.exceptions.CoordinateCollapseError(msg) if dimension_to_groupby is None: - dimension_to_groupby = dimension[0] - if dimension_to_groupby != dimension[0]: - msg = 'Cannot group-by coordinates over different dimensions.' + dimension_to_groupby = dimensions[0] + if dimension_to_groupby != dimensions[0]: + msg = "Cannot group-by coordinates over different dimensions." raise iris.exceptions.CoordinateCollapseError(msg) groupby_coords.append(coord) + if dimension_to_groupby is None: + raise ValueError("Unable to aggregate by an empty list of `coords`.") + + # Check shape of weights. These must either match the shape of the cube + # or be 1D (in this case, their length must be equal to the length of the + # dimension we are aggregating over). + weights = kwargs.get("weights") + return_weights = kwargs.get("returned", False) + if weights is not None: + if weights.ndim == 1: + if len(weights) != self.shape[dimension_to_groupby]: + raise ValueError( + f"1D weights must have the same length as the dimension " + f"that is aggregated, got {len(weights):d}, expected " + f"{self.shape[dimension_to_groupby]:d}" + ) + weights = iris.util.broadcast_to_shape( + weights, + self.shape, + (dimension_to_groupby,), + ) + if weights.shape != self.shape: + raise ValueError( + f"Weights must either be 1D or have the same shape as the " + f"cube, got shape {weights.shape} for weights, " + f"{self.shape} for cube" + ) + # Determine the other coordinates that share the same group-by # coordinate dimension. - shared_coords = list(filter( - lambda coord_: coord_ not in groupby_coords, - self.coords(contains_dimension=dimension_to_groupby))) + shared_coords = list( + filter( + lambda coord_: coord_ not in groupby_coords + and dimension_to_groupby in self.coord_dims(coord_), + self.dim_coords + self.aux_coords, + ) + ) # Determine which of each shared coord's dimensions will be aggregated. shared_coords_and_dims = [ (coord_, index) for coord_ in shared_coords for (index, dim) in enumerate(self.coord_dims(coord_)) - if dim == dimension_to_groupby] + if dim == dimension_to_groupby + ] # Create the aggregation group-by instance. - groupby = iris.analysis._Groupby(groupby_coords, - shared_coords_and_dims) + groupby = iris.analysis._Groupby( + groupby_coords, + shared_coords_and_dims, + climatological=climatological, + ) # Create the resulting aggregate-by cube and remove the original # coordinates that are going to be groupedby. - key = [slice(None, None)] * self.ndim + aggregateby_cube = iris.util._strip_metadata_from_dims( + self, [dimension_to_groupby] + ) + key: list[slice | tuple[int, ...]] = [slice(None, None)] * self.ndim # Generate unique index tuple key to maintain monotonicity. key[dimension_to_groupby] = tuple(range(len(groupby))) - key = tuple(key) - aggregateby_cube = self[key] + aggregateby_cube = aggregateby_cube[tuple(key)] for coord in groupby_coords + shared_coords: aggregateby_cube.remove_coord(coord) + coord_mapping = {} + for coord in aggregateby_cube.coords(): + orig_id = id(self.coord(coord)) + coord_mapping[orig_id] = coord + # Determine the group-by cube data shape. data_shape = list(self.shape + aggregator.aggregate_shape(**kwargs)) data_shape[dimension_to_groupby] = len(groupby) - # Aggregate the group-by data. - if (aggregator.lazy_func is not None and self.has_lazy_data()): - front_slice = (slice(None, None),) * dimension_to_groupby - back_slice = (slice(None, None),) * (len(data_shape) - - dimension_to_groupby - - 1) - groupby_subcubes = map( - lambda groupby_slice: - self[front_slice + (groupby_slice,) + back_slice].lazy_data(), - groupby.group() + # Choose appropriate data and functions for data aggregation. + if aggregator.lazy_func is not None and self.has_lazy_data(): + input_data = self.lazy_data() + agg_method = aggregator.lazy_aggregate + else: + input_data = self.data + agg_method = aggregator.aggregate + + # Create data and weights slices. + front_slice = (slice(None),) * dimension_to_groupby + back_slice = (slice(None),) * (len(data_shape) - dimension_to_groupby - 1) + + groupby_subarrs = ( + iris.util._slice_data_with_keys( + input_data, front_slice + (groupby_slice,) + back_slice + )[1] + for groupby_slice in groupby.group() + ) + + if weights is not None: + groupby_subweights = ( + weights[front_slice + (groupby_slice,) + back_slice] + for groupby_slice in groupby.group() ) - agg = partial(aggregator.lazy_aggregate, - axis=dimension_to_groupby, - **kwargs) - result = list(map(agg, groupby_subcubes)) - aggregateby_data = da.stack(result, axis=dimension_to_groupby) else: - cube_slice = [slice(None, None)] * len(data_shape) - for i, groupby_slice in enumerate(groupby.group()): - # Slice the cube with the group-by slice to create a group-by - # sub-cube. - cube_slice[dimension_to_groupby] = groupby_slice - groupby_sub_cube = self[tuple(cube_slice)] - # Perform the aggregation over the group-by sub-cube and - # repatriate the aggregated data into the aggregate-by - # cube data. - cube_slice[dimension_to_groupby] = i - result = aggregator.aggregate(groupby_sub_cube.data, - axis=dimension_to_groupby, - **kwargs) - - # Determine aggregation result data type for the aggregate-by - # cube data on first pass. - if i == 0: - if ma.isMaskedArray(self.data): - aggregateby_data = ma.zeros(data_shape, - dtype=result.dtype) - else: - aggregateby_data = np.zeros(data_shape, - dtype=result.dtype) - aggregateby_data[tuple(cube_slice)] = result + groupby_subweights = (None for _ in range(len(groupby))) + + # Aggregate data slices. + agg = iris.analysis.create_weighted_aggregator_fn( + agg_method, axis=dimension_to_groupby, **kwargs + ) + result = tuple(map(agg, groupby_subarrs, groupby_subweights)) + + # If weights are returned, "result" is a list of tuples (each tuple + # contains two elements; the first is the aggregated data, the + # second is the aggregated weights). Convert these to two lists + # (one for the aggregated data and one for the aggregated weights) + # before combining the different slices. + if return_weights: + data_result, weights_result = list(zip(*result)) + aggregateby_weights = _lazy.stack(weights_result, axis=dimension_to_groupby) + else: + data_result = result + aggregateby_weights = None + + aggregateby_data = _lazy.stack(data_result, axis=dimension_to_groupby) + # Ensure plain ndarray is output if plain ndarray was input. + if ma.isMaskedArray(aggregateby_data) and not ma.isMaskedArray(input_data): + aggregateby_data = ma.getdata(aggregateby_data) # Add the aggregation meta data to the aggregate-by cube. - aggregator.update_metadata(aggregateby_cube, - groupby_coords, - aggregate=True, **kwargs) + aggregator.update_metadata( + aggregateby_cube, + groupby_coords, + aggregate=True, + _weights_units=getattr(weights_info, "units", None), + **kwargs, + ) # Replace the appropriate coordinates within the aggregate-by cube. - dim_coord, = self.coords(dimensions=dimension_to_groupby, - dim_coords=True) or [None] + dim_coords = self.coords(dimensions=dimension_to_groupby, dim_coords=True) + dim_coord = dim_coords[0] if dim_coords else None + for coord in groupby.coords: - if dim_coord is not None and \ - dim_coord._as_defn() == coord._as_defn() and \ - isinstance(coord, iris.coords.DimCoord): - aggregateby_cube.add_dim_coord(coord.copy(), - dimension_to_groupby) + new_coord = coord.copy() + + # The metadata may have changed (e.g. climatology), so check if + # there's a better coord to pass to self.coord_dims + lookup_coord = coord + for ( + cube_coord, + groupby_coord, + ) in groupby.coord_replacement_mapping: + if coord == groupby_coord: + lookup_coord = cube_coord + + if ( + dim_coord is not None + and dim_coord.metadata == lookup_coord.metadata + and isinstance(coord, iris.coords.DimCoord) + ): + aggregateby_cube.add_dim_coord(new_coord, dimension_to_groupby) else: - aggregateby_cube.add_aux_coord(coord.copy(), - self.coord_dims(coord)) + aggregateby_cube.add_aux_coord(new_coord, self.coord_dims(lookup_coord)) + coord_mapping[id(self.coord(lookup_coord))] = new_coord + + aggregateby_cube._aux_factories = [] + for factory in self.aux_factories: + aggregateby_cube.add_aux_factory(factory.updated(coord_mapping)) # Attach the aggregate-by data into the aggregate-by cube. - aggregateby_cube = aggregator.post_process(aggregateby_cube, - aggregateby_data, - coords, **kwargs) + if aggregateby_weights is None: + data_result = aggregateby_data + else: + data_result = (aggregateby_data, aggregateby_weights) + aggregateby_cube = aggregator.post_process( + aggregateby_cube, data_result, coordinates, **kwargs + ) return aggregateby_cube - def rolling_window(self, coord, aggregator, window, **kwargs): - """ + def rolling_window( + self, + coord: str | AuxCoord | DimCoord, + aggregator: iris.analysis.Aggregator, + window: int, + **kwargs, + ) -> Cube: + """Perform rolling window aggregation on a cube. + Perform rolling window aggregation on a cube given a coordinate, an aggregation method and a window size. - Args: - - * coord (string/:class:`iris.coords.Coord`): + Parameters + ---------- + coord : The coordinate over which to perform the rolling window aggregation. - * aggregator (:class:`iris.analysis.Aggregator`): + aggregator : Aggregator to be applied to the data. - * window (int): + window : Size of window to use. - - Kwargs: - - * kwargs: + **kwargs : Aggregator and aggregation function keyword arguments. The weights - argument to the aggregator, if any, should be a 1d array with the - same length as the chosen window. - - Returns: - :class:`iris.cube.Cube`. - - .. note:: - - This operation does not yet have support for lazy evaluation. - - For example: - + argument to the aggregator, if any, should be a 1d array, cube, or + (names of) :meth:`~iris.cube.Cube.coords`, + :meth:`~iris.cube.Cube.cell_measures`, or + :meth:`~iris.cube.Cube.ancillary_variables` with the same length as + the chosen window. + + Returns + ------- + :class:`iris.cube.Cube`. + + Examples + -------- >>> import iris, iris.analysis >>> fname = iris.sample_data_path('GloSea4', 'ensemble_010.pp') - >>> air_press = iris.load_cube(fname, 'surface_temperature') - >>> print(air_press) + >>> cube = iris.load_cube(fname, 'surface_temperature') + >>> print(cube) surface_temperature / (K) \ (time: 6; latitude: 145; longitude: 192) - Dimension coordinates: - time \ - x - - - latitude \ - - x - - longitude \ - - - x - Auxiliary coordinates: - forecast_period \ - x - - - Scalar coordinates: - forecast_reference_time: 2011-07-23 00:00:00 - realization: 10 - Attributes: - STASH: m01s00i024 - source: Data from Met Office Unified Model - um_version: 7.6 - Cell methods: - mean: time (1 hour) - - - >>> print(air_press.rolling_window('time', iris.analysis.MEAN, 3)) + Dimension coordinates: + time \ +x - - + latitude \ +- x - + longitude \ +- - x + Auxiliary coordinates: + forecast_period \ +x - - + Scalar coordinates: + forecast_reference_time 2011-07-23 00:00:00 + realization 10 + Cell methods: + 0 time: mean (interval: 1 hour) + Attributes: + STASH m01s00i024 + source \ +'Data from Met Office Unified Model' + um_version '7.6' + + >>> print(cube.rolling_window('time', iris.analysis.MEAN, 3)) surface_temperature / (K) \ (time: 4; latitude: 145; longitude: 192) - Dimension coordinates: - time \ - x - - - latitude \ - - x - - longitude \ - - - x - Auxiliary coordinates: - forecast_period \ - x - - - Scalar coordinates: - forecast_reference_time: 2011-07-23 00:00:00 - realization: 10 - Attributes: - STASH: m01s00i024 - source: Data from Met Office Unified Model - um_version: 7.6 - Cell methods: - mean: time (1 hour) - mean: time - + Dimension coordinates: + time \ +x - - + latitude \ +- x - + longitude \ +- - x + Auxiliary coordinates: + forecast_period \ +x - - + Scalar coordinates: + forecast_reference_time 2011-07-23 00:00:00 + realization 10 + Cell methods: + 0 time: mean (interval: 1 hour) + 1 time: mean + Attributes: + STASH m01s00i024 + source \ +'Data from Met Office Unified Model' + um_version '7.6' Notice that the forecast_period dimension now represents the 4 possible windows of size 3 from the original cube. - """ + """ # noqa: D214, D406, D407, D410, D411 + # Update weights kwargs (if necessary) to handle different types of + # weights + weights_info = None + if kwargs.get("weights") is not None: + weights_info = _Weights(kwargs["weights"], self) + kwargs["weights"] = weights_info.array + coord = self._as_list_of_coords(coord)[0] - if getattr(coord, 'circular', False): + if getattr(coord, "circular", False): raise iris.exceptions.NotYetImplementedError( - 'Rolling window over a circular coordinate.') + "Rolling window over a circular coordinate." + ) if window < 2: - raise ValueError('Cannot perform rolling window ' - 'with a window size less than 2.') + raise ValueError( + "Cannot perform rolling window with a window size less than 2." + ) if coord.ndim > 1: raise iris.exceptions.CoordinateMultiDimError(coord) - dimension = self.coord_dims(coord) - if len(dimension) != 1: + dimensions = self.coord_dims(coord) + if len(dimensions) != 1: raise iris.exceptions.CoordinateCollapseError( 'Cannot perform rolling window with coordinate "%s", ' - 'must map to one data dimension.' % coord.name()) - dimension = dimension[0] + "must map to one data dimension." % coord.name() + ) + dimension = dimensions[0] # Use indexing to get a result-cube of the correct shape. # NB. This indexes the data array which is wasted work. @@ -3625,37 +4852,42 @@ def rolling_window(self, coord, aggregator, window, **kwargs): # some sort of `cube.prepare()` method would be handy to allow # re-shaping with given data, and returning a mapping of # old-to-new-coords (to avoid having to use metadata identity)? + new_cube = iris.util._strip_metadata_from_dims(self, [dimension]) key = [slice(None, None)] * self.ndim key[dimension] = slice(None, self.shape[dimension] - window + 1) - new_cube = self[tuple(key)] + new_cube = new_cube[tuple(key)] # take a view of the original data using the rolling_window function # this will add an extra dimension to the data at dimension + 1 which # represents the rolled window (i.e. will have a length of window) - rolling_window_data = iris.util.rolling_window(self.data, - window=window, - axis=dimension) + rolling_window_data = iris.util.rolling_window( + self.core_data(), window=window, axis=dimension + ) # now update all of the coordinates to reflect the aggregation for coord_ in self.coords(dimensions=dimension): if coord_.has_bounds(): - warnings.warn('The bounds of coordinate %r were ignored in ' - 'the rolling window operation.' % coord_.name()) + warnings.warn( + "The bounds of coordinate %r were ignored in " + "the rolling window operation." % coord_.name(), + category=iris.warnings.IrisIgnoringBoundsWarning, + ) if coord_.ndim != 1: - raise ValueError('Cannot calculate the rolling ' - 'window of %s as it is a multidimensional ' - 'coordinate.' % coord_.name()) + raise ValueError( + "Cannot calculate the rolling " + "window of %s as it is a multidimensional " + "coordinate." % coord_.name() + ) - new_bounds = iris.util.rolling_window(coord_.points, window) + new_bounds = iris.util.rolling_window(coord_.core_points(), window) if np.issubdtype(new_bounds.dtype, np.str_): # Handle case where the AuxCoord contains string. The points # are the serialized form of the points contributing to each # window and the bounds are the first and last points in the # window as with numeric coordinates. - new_points = np.apply_along_axis(lambda x: '|'.join(x), -1, - new_bounds) + new_points = np.apply_along_axis(lambda x: "|".join(x), -1, new_bounds) new_bounds = new_bounds[:, (0, -1)] else: # Take the first and last element of the rolled window (i.e. @@ -3671,63 +4903,79 @@ def rolling_window(self, coord, aggregator, window, **kwargs): # update the metadata of the cube itself aggregator.update_metadata( - new_cube, [coord], - action='with a rolling window of length %s over' % window, - **kwargs) + new_cube, + [coord], + action="with a rolling window of length %s over" % window, + _weights_units=getattr(weights_info, "units", None), + **kwargs, + ) # and perform the data transformation, generating weights first if # needed - if isinstance(aggregator, iris.analysis.WeightedAggregator) and \ - aggregator.uses_weighting(**kwargs): - if 'weights' in kwargs: - weights = kwargs['weights'] + if isinstance( + aggregator, iris.analysis.WeightedAggregator + ) and aggregator.uses_weighting(**kwargs): + if "weights" in kwargs: + weights = kwargs["weights"] if weights.ndim > 1 or weights.shape[0] != window: - raise ValueError('Weights for rolling window aggregation ' - 'must be a 1d array with the same length ' - 'as the window.') + raise ValueError( + "Weights for rolling window aggregation " + "must be a 1d array with the same length " + "as the window." + ) kwargs = dict(kwargs) - kwargs['weights'] = iris.util.broadcast_to_shape( - weights, rolling_window_data.shape, (dimension + 1,)) - data_result = aggregator.aggregate(rolling_window_data, - axis=dimension + 1, - **kwargs) - result = aggregator.post_process(new_cube, data_result, [coord], - **kwargs) + kwargs["weights"] = iris.util.broadcast_to_shape( + weights, rolling_window_data.shape, (dimension + 1,) + ) + + if aggregator.lazy_func is not None and self.has_lazy_data(): + agg_method = aggregator.lazy_aggregate + else: + agg_method = aggregator.aggregate + data_result = agg_method(rolling_window_data, axis=dimension + 1, **kwargs) + result = aggregator.post_process(new_cube, data_result, [coord], **kwargs) return result - def interpolate(self, sample_points, scheme, collapse_scalar=True): - """ + def interpolate( + self, + sample_points: Iterable[tuple[AuxCoord | DimCoord | str, np.typing.ArrayLike]], + scheme: iris.analysis.InterpolationScheme, + collapse_scalar: bool = True, + ) -> Cube: + """Interpolate from this :class:`~iris.cube.Cube` to the given sample points. + Interpolate from this :class:`~iris.cube.Cube` to the given sample points using the given interpolation scheme. - Args: - - * sample_points: + Parameters + ---------- + sample_points : A sequence of (coordinate, points) pairs over which to interpolate. The values for coordinates that correspond to dates or times may optionally be supplied as datetime.datetime or cftime.datetime instances. - * scheme: - The type of interpolation to use to interpolate from this + The N pairs supplied will be used to create an N-d grid of points + that will then be sampled (rather than just N points). + scheme : + An instance of the type of interpolation to use to interpolate from this :class:`~iris.cube.Cube` to the given sample points. The interpolation schemes currently available in Iris are: - * :class:`iris.analysis.Linear`, and - * :class:`iris.analysis.Nearest`. - - Kwargs: - - * collapse_scalar: + * :class:`iris.analysis.Linear`, and + * :class:`iris.analysis.Nearest`. + collapse_scalar : bool, default=True Whether to collapse the dimension of scalar sample points in the resulting cube. Default is True. - Returns: + Returns + ------- + cube A cube interpolated at the given sample points. If `collapse_scalar` is True then the dimensionality of the cube will be the number of original cube dimensions minus the number of scalar coordinates. - For example: - + Examples + -------- >>> import datetime >>> import iris >>> path = iris.sample_data_path('uk_hires.pp') @@ -3736,8 +4984,11 @@ def interpolate(self, sample_points, scheme, collapse_scalar=True): air_potential_temperature / (K) \ (time: 3; model_level_number: 7; grid_latitude: 204; grid_longitude: 187) >>> print(cube.coord('time')) - DimCoord([2009-11-19 10:00:00, 2009-11-19 11:00:00, \ -2009-11-19 12:00:00], standard_name='time', calendar='gregorian') + DimCoord : time / (hours since 1970-01-01 00:00:00, standard calendar) + points: [2009-11-19 10:00:00, 2009-11-19 11:00:00, 2009-11-19 12:00:00] + shape: (3,) + dtype: float64 + standard_name: 'time' >>> print(cube.coord('time').points) [349618. 349619. 349620.] >>> samples = [('time', 349618.5)] @@ -3746,8 +4997,11 @@ def interpolate(self, sample_points, scheme, collapse_scalar=True): air_potential_temperature / (K) \ (model_level_number: 7; grid_latitude: 204; grid_longitude: 187) >>> print(result.coord('time')) - DimCoord([2009-11-19 10:30:00], standard_name='time', \ -calendar='gregorian') + DimCoord : time / (hours since 1970-01-01 00:00:00, standard calendar) + points: [2009-11-19 10:30:00] + shape: (1,) + dtype: float64 + standard_name: 'time' >>> print(result.coord('time').points) [349618.5] >>> # For datetime-like coordinates, we can also use @@ -3758,8 +5012,11 @@ def interpolate(self, sample_points, scheme, collapse_scalar=True): air_potential_temperature / (K) \ (model_level_number: 7; grid_latitude: 204; grid_longitude: 187) >>> print(result2.coord('time')) - DimCoord([2009-11-19 10:30:00], standard_name='time', \ -calendar='gregorian') + DimCoord : time / (hours since 1970-01-01 00:00:00, standard calendar) + points: [2009-11-19 10:30:00] + shape: (1,) + dtype: float64 + standard_name: 'time' >>> print(result2.coord('time').points) [349618.5] >>> print(result == result2) @@ -3767,33 +5024,44 @@ def interpolate(self, sample_points, scheme, collapse_scalar=True): """ coords, points = zip(*sample_points) - interp = scheme.interpolator(self, coords) + interp = scheme.interpolator(self, coords) # type: ignore[arg-type] return interp(points, collapse_scalar=collapse_scalar) - def regrid(self, grid, scheme): - """ + def regrid(self, grid: Cube, scheme: iris.analysis.RegriddingScheme) -> Cube: + r"""Regrid this :class:`~iris.cube.Cube` on to the given target `grid`. + Regrid this :class:`~iris.cube.Cube` on to the given target `grid` using the given regridding `scheme`. - Args: - - * grid: + Parameters + ---------- + grid : A :class:`~iris.cube.Cube` that defines the target grid. - * scheme: - The type of regridding to use to regrid this cube onto the - target grid. The regridding schemes currently available - in Iris are: + scheme : + An instance of the type of regridding to use to regrid this cube onto the + target grid. The regridding schemes in Iris currently include: + + * :class:`iris.analysis.Linear`\*, + * :class:`iris.analysis.Nearest`\*, + * :class:`iris.analysis.AreaWeighted`\*, + * :class:`iris.analysis.UnstructuredNearest`, + * :class:`iris.analysis.PointInCell`, - * :class:`iris.analysis.Linear`, - * :class:`iris.analysis.Nearest`, and - * :class:`iris.analysis.AreaWeighted`. + \* Supports lazy regridding. - Returns: + Returns + ------- + :class:`~iris.cube` A cube defined with the horizontal dimensions of the target grid and the other dimensions from this cube. The data values of this cube will be converted to values on the new grid according to the given regridding scheme. + The returned cube will have lazy data if the original cube has + lazy data and the regridding scheme supports lazy regridding. + + Notes + ----- .. note:: Both the source and target cubes must have a CoordSystem, otherwise @@ -3804,9 +5072,8 @@ def regrid(self, grid, scheme): return regridder(self) -class ClassDict(MutableMapping, object): - """ - A mapping that stores objects keyed on their superclasses and their names. +class ClassDict(MutableMapping): + """A mapping that stores objects keyed on their superclasses and their names. The mapping has a root class, all stored objects must be a subclass of the root class. The superclasses used for an object include the class of the @@ -3814,19 +5081,20 @@ class ClassDict(MutableMapping, object): any key. """ + def __init__(self, superclass): if not isinstance(superclass, type): - raise TypeError("The superclass must be a Python type or new " - "style class.") + raise TypeError("The superclass must be a Python type or new style class.") self._superclass = superclass self._basic_map = {} self._retrieval_map = {} def add(self, object_, replace=False): - '''Add an object to the dictionary.''' + """Add an object to the dictionary.""" if not isinstance(object_, self._superclass): msg = "Only subclasses of {!r} are allowed as values.".format( - self._superclass.__name__) + self._superclass.__name__ + ) raise TypeError(msg) # Find all the superclasses of the given object, starting with the # object's class. @@ -3838,9 +5106,11 @@ def add(self, object_, replace=False): # object. for key_class in superclasses: if key_class in self._retrieval_map: - msg = "Cannot add instance of '%s' because instance of " \ - "'%s' already added." % (type(object_).__name__, - key_class.__name__) + msg = ( + "Cannot add instance of '%s' because instance of " + "'%s' already added." + % (type(object_).__name__, key_class.__name__) + ) raise ValueError(msg) # Register the given object against those superclasses. for key_class in superclasses: @@ -3852,14 +5122,14 @@ def __getitem__(self, class_): try: return self._retrieval_map[class_] except KeyError: - raise KeyError('Coordinate system %r does not exist.' % class_) + raise KeyError("Coordinate system %r does not exist." % class_) def __setitem__(self, key, value): - raise NotImplementedError('You must call the add method instead.') + raise NotImplementedError("You must call the add method instead.") def __delitem__(self, class_): cs = self[class_] - keys = [k for k, v in six.iteritems(self._retrieval_map) if v == cs] + keys = [k for k, v in self._retrieval_map.items() if v == cs] for key in keys: del self._retrieval_map[key] del self._basic_map[type(cs)] @@ -3873,20 +5143,21 @@ def __iter__(self): yield item def keys(self): - '''Return the keys of the dictionary mapping.''' + """Return the keys of the dictionary mapping.""" return self._basic_map.keys() def sorted_axes(axes): - """ - Returns the axis names sorted alphabetically, with the exception that + """Return the axis names sorted alphabetically. + + Return the axis names sorted alphabetically, with the exception that 't', 'z', 'y', and, 'x' are sorted to the end. """ - return sorted(axes, key=lambda name: ({'x': 4, - 'y': 3, - 'z': 2, - 't': 1}.get(name, 0), name)) + return sorted( + axes, + key=lambda name: ({"x": 4, "y": 3, "z": 2, "t": 1}.get(name, 0), name), + ) # See Cube.slice() for the definition/context. diff --git a/lib/iris/etc/palette/diverging/BrBG_11.txt b/lib/iris/etc/palette/diverging/BrBG_11.txt index 7243c178ae..bf1c566f81 100644 --- a/lib/iris/etc/palette/diverging/BrBG_11.txt +++ b/lib/iris/etc/palette/diverging/BrBG_11.txt @@ -5,7 +5,7 @@ # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR @@ -16,7 +16,7 @@ # scheme: diverging # keyword: anomaly # interpolate: off -# source: http://colorbrewer.org/ +# source: https://colorbrewer.org/ # type: RGB # 0.329412 0.188235 0.019608 diff --git a/lib/iris/etc/palette/diverging/PRGn_11.txt b/lib/iris/etc/palette/diverging/PRGn_11.txt index 32fdee2871..e78d56e04d 100644 --- a/lib/iris/etc/palette/diverging/PRGn_11.txt +++ b/lib/iris/etc/palette/diverging/PRGn_11.txt @@ -5,7 +5,7 @@ # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR @@ -16,7 +16,7 @@ # scheme: diverging # keyword: anomaly # interpolate: off -# source: http://colorbrewer.org/ +# source: https://colorbrewer.org/ # type: RGB # 0.250980 0.000000 0.294118 diff --git a/lib/iris/etc/palette/diverging/PiYG_11.txt b/lib/iris/etc/palette/diverging/PiYG_11.txt index b52c50acbc..826c48e361 100644 --- a/lib/iris/etc/palette/diverging/PiYG_11.txt +++ b/lib/iris/etc/palette/diverging/PiYG_11.txt @@ -5,7 +5,7 @@ # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR @@ -16,7 +16,7 @@ # scheme: diverging # keyword: anomaly # interpolate: off -# source: http://colorbrewer.org/ +# source: https://colorbrewer.org/ # type: RGB # 0.556863 0.003922 0.321569 diff --git a/lib/iris/etc/palette/diverging/PuOr_11.txt b/lib/iris/etc/palette/diverging/PuOr_11.txt index 8e9c32b8d0..4785c4b536 100644 --- a/lib/iris/etc/palette/diverging/PuOr_11.txt +++ b/lib/iris/etc/palette/diverging/PuOr_11.txt @@ -5,7 +5,7 @@ # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR @@ -17,7 +17,7 @@ # std_name: air_pressure_at_sea_level # keyword: anomaly # interpolate: off -# source: http://colorbrewer.org/ +# source: https://colorbrewer.org/ # type: RGB # 0.176471 0.000000 0.294118 diff --git a/lib/iris/etc/palette/diverging/RdBu_11.txt b/lib/iris/etc/palette/diverging/RdBu_11.txt index 526132e2a0..f7da164953 100644 --- a/lib/iris/etc/palette/diverging/RdBu_11.txt +++ b/lib/iris/etc/palette/diverging/RdBu_11.txt @@ -5,7 +5,7 @@ # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR @@ -16,7 +16,7 @@ # scheme: diverging # keyword: anomaly, temperature # interpolate: off -# source: http://colorbrewer.org/ +# source: https://colorbrewer.org/ # type: RGB # 0.019608 0.188235 0.380392 diff --git a/lib/iris/etc/palette/diverging/RdGy_11.txt b/lib/iris/etc/palette/diverging/RdGy_11.txt index c8ade7f388..0b8ae55480 100644 --- a/lib/iris/etc/palette/diverging/RdGy_11.txt +++ b/lib/iris/etc/palette/diverging/RdGy_11.txt @@ -5,7 +5,7 @@ # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR @@ -16,7 +16,7 @@ # scheme: diverging # keyword: anomaly # interpolate: off -# source: http://colorbrewer.org/ +# source: https://colorbrewer.org/ # type: RGB # 0.403922 0.000000 0.121569 diff --git a/lib/iris/etc/palette/diverging/RdYlBu_11.txt b/lib/iris/etc/palette/diverging/RdYlBu_11.txt index 84cc3dd2c2..5d799e8e77 100644 --- a/lib/iris/etc/palette/diverging/RdYlBu_11.txt +++ b/lib/iris/etc/palette/diverging/RdYlBu_11.txt @@ -5,7 +5,7 @@ # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR @@ -16,7 +16,7 @@ # scheme: diverging # keyword: anomaly # interpolate: off -# source: http://colorbrewer.org/ +# source: https://colorbrewer.org/ # type: RGB # 0.647059 0.000000 0.149020 diff --git a/lib/iris/etc/palette/diverging/RdYlGn_11.txt b/lib/iris/etc/palette/diverging/RdYlGn_11.txt index f1d626d493..d17ff39177 100644 --- a/lib/iris/etc/palette/diverging/RdYlGn_11.txt +++ b/lib/iris/etc/palette/diverging/RdYlGn_11.txt @@ -5,7 +5,7 @@ # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR @@ -16,7 +16,7 @@ # scheme: diverging # keyword: anomaly # interpolate: off -# source: http://colorbrewer.org/ +# source: https://colorbrewer.org/ # type: RGB # 0.647059 0.000000 0.149020 diff --git a/lib/iris/etc/palette/diverging/Spectral_11.txt b/lib/iris/etc/palette/diverging/Spectral_11.txt index 03eaa98f7f..f2a4447846 100644 --- a/lib/iris/etc/palette/diverging/Spectral_11.txt +++ b/lib/iris/etc/palette/diverging/Spectral_11.txt @@ -5,7 +5,7 @@ # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR @@ -16,7 +16,7 @@ # scheme: diverging # keyword: anomaly # interpolate: off -# source: http://colorbrewer.org/ +# source: https://colorbrewer.org/ # type: RGB # 0.619608 0.003922 0.258824 diff --git a/lib/iris/etc/palette/qualitative/Accent_08.txt b/lib/iris/etc/palette/qualitative/Accent_08.txt index 9b1f3c49c3..bb13fcb5a9 100644 --- a/lib/iris/etc/palette/qualitative/Accent_08.txt +++ b/lib/iris/etc/palette/qualitative/Accent_08.txt @@ -5,7 +5,7 @@ # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR @@ -15,7 +15,7 @@ # name: Accent_08 # scheme: qualitative # interpolate: off -# source: http://colorbrewer.org/ +# source: https://colorbrewer.org/ # type: RGB # 0.498039 0.788235 0.498039 diff --git a/lib/iris/etc/palette/qualitative/Dark2_08.txt b/lib/iris/etc/palette/qualitative/Dark2_08.txt index bd33878e10..5d1599a31b 100644 --- a/lib/iris/etc/palette/qualitative/Dark2_08.txt +++ b/lib/iris/etc/palette/qualitative/Dark2_08.txt @@ -5,7 +5,7 @@ # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR @@ -15,7 +15,7 @@ # name: Dark2_08 # scheme: qualitative # interpolate: off -# source: http://colorbrewer.org/ +# source: https://colorbrewer.org/ # type: RGB # 0.105882 0.619608 0.466667 diff --git a/lib/iris/etc/palette/qualitative/Paired_12.txt b/lib/iris/etc/palette/qualitative/Paired_12.txt index b4efea1c92..8b8154ff3b 100644 --- a/lib/iris/etc/palette/qualitative/Paired_12.txt +++ b/lib/iris/etc/palette/qualitative/Paired_12.txt @@ -5,7 +5,7 @@ # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR @@ -15,7 +15,7 @@ # name: Paired_12 # scheme: qualitative # interpolate: off -# source: http://colorbrewer.org/ +# source: https://colorbrewer.org/ # type: RGB # 0.650980 0.807843 0.890196 diff --git a/lib/iris/etc/palette/qualitative/Pastel1_09.txt b/lib/iris/etc/palette/qualitative/Pastel1_09.txt index fb6ef6d0a8..042009995e 100644 --- a/lib/iris/etc/palette/qualitative/Pastel1_09.txt +++ b/lib/iris/etc/palette/qualitative/Pastel1_09.txt @@ -5,7 +5,7 @@ # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR @@ -15,7 +15,7 @@ # name: Pastel1_09 # scheme: qualitative # interpolate: off -# source: http://colorbrewer.org/ +# source: https://colorbrewer.org/ # type: RGB # 0.984314 0.705882 0.682353 diff --git a/lib/iris/etc/palette/qualitative/Pastel2_08.txt b/lib/iris/etc/palette/qualitative/Pastel2_08.txt index 1fda519549..05b30fef0b 100644 --- a/lib/iris/etc/palette/qualitative/Pastel2_08.txt +++ b/lib/iris/etc/palette/qualitative/Pastel2_08.txt @@ -5,7 +5,7 @@ # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR @@ -15,7 +15,7 @@ # name: Pastel2_08 # scheme: qualitative # interpolate: off -# source: http://colorbrewer.org/ +# source: https://colorbrewer.org/ # type: RGB # 0.701961 0.886275 0.803922 diff --git a/lib/iris/etc/palette/qualitative/Set1_09.txt b/lib/iris/etc/palette/qualitative/Set1_09.txt index 3dd2145930..a2f9f57c4b 100644 --- a/lib/iris/etc/palette/qualitative/Set1_09.txt +++ b/lib/iris/etc/palette/qualitative/Set1_09.txt @@ -5,7 +5,7 @@ # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR @@ -15,7 +15,7 @@ # name: Set1_09 # scheme: qualitative # interpolate: off -# source: http://colorbrewer.org/ +# source: https://colorbrewer.org/ # type: RGB # 0.894118 0.101961 0.109804 diff --git a/lib/iris/etc/palette/qualitative/Set2_08.txt b/lib/iris/etc/palette/qualitative/Set2_08.txt index a643828a22..40cfa0d738 100644 --- a/lib/iris/etc/palette/qualitative/Set2_08.txt +++ b/lib/iris/etc/palette/qualitative/Set2_08.txt @@ -5,7 +5,7 @@ # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR @@ -15,7 +15,7 @@ # name: Set2_08 # scheme: qualitative # interpolate: off -# source: http://colorbrewer.org/ +# source: https://colorbrewer.org/ # type: RGB # 0.400000 0.760784 0.647059 diff --git a/lib/iris/etc/palette/qualitative/Set3_12.txt b/lib/iris/etc/palette/qualitative/Set3_12.txt index 589352fc60..33bdaf372c 100644 --- a/lib/iris/etc/palette/qualitative/Set3_12.txt +++ b/lib/iris/etc/palette/qualitative/Set3_12.txt @@ -5,7 +5,7 @@ # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR @@ -15,7 +15,7 @@ # name: Set3_12 # scheme: qualitative # interpolate: off -# source: http://colorbrewer.org/ +# source: https://colorbrewer.org/ # type: RGB # 0.552941 0.827451 0.780392 diff --git a/lib/iris/etc/palette/sequential/Blues_09.txt b/lib/iris/etc/palette/sequential/Blues_09.txt index 37c7e6082c..d489a46d61 100644 --- a/lib/iris/etc/palette/sequential/Blues_09.txt +++ b/lib/iris/etc/palette/sequential/Blues_09.txt @@ -5,7 +5,7 @@ # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR @@ -16,7 +16,7 @@ # scheme: sequential # keyword: lwe_precipitation, convective_precipitation, stratiform_precipitation, precipitation_amount # interpolate: off -# source: http://colorbrewer.org/ +# source: https://colorbrewer.org/ # type: RGB # 0.968627 0.984314 1.000000 diff --git a/lib/iris/etc/palette/sequential/BuGn_09.txt b/lib/iris/etc/palette/sequential/BuGn_09.txt index 28b5bfc11f..cde85b422c 100644 --- a/lib/iris/etc/palette/sequential/BuGn_09.txt +++ b/lib/iris/etc/palette/sequential/BuGn_09.txt @@ -5,7 +5,7 @@ # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR @@ -15,7 +15,7 @@ # name: BuGn_09 # scheme: sequential # interpolate: off -# source: http://colorbrewer.org/ +# source: https://colorbrewer.org/ # type: RGB # 0.968627 0.988235 0.992157 diff --git a/lib/iris/etc/palette/sequential/BuPu_09.txt b/lib/iris/etc/palette/sequential/BuPu_09.txt index 6e0596ec80..99fafcc208 100644 --- a/lib/iris/etc/palette/sequential/BuPu_09.txt +++ b/lib/iris/etc/palette/sequential/BuPu_09.txt @@ -5,7 +5,7 @@ # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR @@ -15,7 +15,7 @@ # name: BuPu_09 # scheme: sequential # interpolate: off -# source: http://colorbrewer.org/ +# source: https://colorbrewer.org/ # type: RGB # 0.968627 0.988235 0.992157 diff --git a/lib/iris/etc/palette/sequential/GnBu_09.txt b/lib/iris/etc/palette/sequential/GnBu_09.txt index 0225f496e7..cb3fa28c4a 100644 --- a/lib/iris/etc/palette/sequential/GnBu_09.txt +++ b/lib/iris/etc/palette/sequential/GnBu_09.txt @@ -5,7 +5,7 @@ # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR @@ -15,7 +15,7 @@ # name: GnBu_09 # scheme: sequential # interpolate: off -# source: http://colorbrewer.org/ +# source: https://colorbrewer.org/ # type: RGB # 0.968627 0.988235 0.941176 diff --git a/lib/iris/etc/palette/sequential/Greens_09.txt b/lib/iris/etc/palette/sequential/Greens_09.txt index 8900459b58..e338e6b9b0 100644 --- a/lib/iris/etc/palette/sequential/Greens_09.txt +++ b/lib/iris/etc/palette/sequential/Greens_09.txt @@ -5,7 +5,7 @@ # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR @@ -15,7 +15,7 @@ # name: Greens_09 # scheme: sequential # interpolate: off -# source: http://colorbrewer.org/ +# source: https://colorbrewer.org/ # type: RGB # 0.968627 0.988235 0.960784 diff --git a/lib/iris/etc/palette/sequential/Greys_09.txt b/lib/iris/etc/palette/sequential/Greys_09.txt index d76ec0e6a1..b8d047938f 100644 --- a/lib/iris/etc/palette/sequential/Greys_09.txt +++ b/lib/iris/etc/palette/sequential/Greys_09.txt @@ -5,7 +5,7 @@ # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR @@ -15,7 +15,7 @@ # name: Greys_09 # scheme: sequential # interpolate: off -# source: http://colorbrewer.org/ +# source: https://colorbrewer.org/ # type: RGB # 1.000000 1.000000 1.000000 diff --git a/lib/iris/etc/palette/sequential/OrRd_09.txt b/lib/iris/etc/palette/sequential/OrRd_09.txt index 3e081719a7..3da55efc6f 100644 --- a/lib/iris/etc/palette/sequential/OrRd_09.txt +++ b/lib/iris/etc/palette/sequential/OrRd_09.txt @@ -5,7 +5,7 @@ # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR @@ -15,7 +15,7 @@ # name: OrRd_09 # scheme: sequential # interpolate: off -# source: http://colorbrewer.org/ +# source: https://colorbrewer.org/ # type: RGB # 1.000000 0.968627 0.925490 diff --git a/lib/iris/etc/palette/sequential/Oranges_09.txt b/lib/iris/etc/palette/sequential/Oranges_09.txt index 022be59e99..d5793340aa 100644 --- a/lib/iris/etc/palette/sequential/Oranges_09.txt +++ b/lib/iris/etc/palette/sequential/Oranges_09.txt @@ -5,7 +5,7 @@ # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR @@ -15,7 +15,7 @@ # name: Oranges_09 # scheme: sequential # interpolate: off -# source: http://colorbrewer.org/ +# source: https://colorbrewer.org/ # type: RGB # 1.000000 0.960784 0.921569 diff --git a/lib/iris/etc/palette/sequential/PuBuGn_09.txt b/lib/iris/etc/palette/sequential/PuBuGn_09.txt index 264289cdba..38740edcf5 100644 --- a/lib/iris/etc/palette/sequential/PuBuGn_09.txt +++ b/lib/iris/etc/palette/sequential/PuBuGn_09.txt @@ -5,7 +5,7 @@ # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR @@ -15,7 +15,7 @@ # name: PuBuGn_09 # scheme: sequential # interpolate: off -# source: http://colorbrewer.org/ +# source: https://colorbrewer.org/ # type: RGB # 1.000000 0.968627 0.984314 diff --git a/lib/iris/etc/palette/sequential/PuBu_09.txt b/lib/iris/etc/palette/sequential/PuBu_09.txt index e07b960f0b..44f6c6f01b 100644 --- a/lib/iris/etc/palette/sequential/PuBu_09.txt +++ b/lib/iris/etc/palette/sequential/PuBu_09.txt @@ -5,7 +5,7 @@ # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR @@ -15,7 +15,7 @@ # name: PuBu_09 # scheme: sequential # interpolate: off -# source: http://colorbrewer.org/ +# source: https://colorbrewer.org/ # type: RGB # 1.000000 0.968627 0.984314 diff --git a/lib/iris/etc/palette/sequential/PuRd_09.txt b/lib/iris/etc/palette/sequential/PuRd_09.txt index 19589fb4ad..402584291a 100644 --- a/lib/iris/etc/palette/sequential/PuRd_09.txt +++ b/lib/iris/etc/palette/sequential/PuRd_09.txt @@ -5,7 +5,7 @@ # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR @@ -15,7 +15,7 @@ # name: PuRd_09 # scheme: sequential # interpolate: off -# source: http://colorbrewer.org/ +# source: https://colorbrewer.org/ # type: RGB # 0.968627 0.956863 0.976471 diff --git a/lib/iris/etc/palette/sequential/Purples_09.txt b/lib/iris/etc/palette/sequential/Purples_09.txt index 338dd3d021..1b5811271c 100644 --- a/lib/iris/etc/palette/sequential/Purples_09.txt +++ b/lib/iris/etc/palette/sequential/Purples_09.txt @@ -5,7 +5,7 @@ # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR @@ -15,7 +15,7 @@ # name: Purples_09 # scheme: sequential # interpolate: off -# source: http://colorbrewer.org/ +# source: https://colorbrewer.org/ # type: RGB # 0.988235 0.984314 0.992157 diff --git a/lib/iris/etc/palette/sequential/RdPu_09.txt b/lib/iris/etc/palette/sequential/RdPu_09.txt index a21e4b9000..71054bf397 100644 --- a/lib/iris/etc/palette/sequential/RdPu_09.txt +++ b/lib/iris/etc/palette/sequential/RdPu_09.txt @@ -5,7 +5,7 @@ # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR @@ -15,7 +15,7 @@ # name: RdPu_09 # scheme: sequential # interpolate: off -# source: http://colorbrewer.org/ +# source: https://colorbrewer.org/ # type: RGB # 1.000000 0.968627 0.952941 diff --git a/lib/iris/etc/palette/sequential/Reds_09.txt b/lib/iris/etc/palette/sequential/Reds_09.txt index d03594e058..445001df64 100644 --- a/lib/iris/etc/palette/sequential/Reds_09.txt +++ b/lib/iris/etc/palette/sequential/Reds_09.txt @@ -5,7 +5,7 @@ # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR @@ -15,7 +15,7 @@ # name: Reds_09 # scheme: sequential # interpolate: off -# source: http://colorbrewer.org/ +# source: https://colorbrewer.org/ # type: RGB # 1.000000 0.960784 0.941176 diff --git a/lib/iris/etc/palette/sequential/YlGnBu_09.txt b/lib/iris/etc/palette/sequential/YlGnBu_09.txt index 733d1aebca..e1699f1f5b 100644 --- a/lib/iris/etc/palette/sequential/YlGnBu_09.txt +++ b/lib/iris/etc/palette/sequential/YlGnBu_09.txt @@ -5,7 +5,7 @@ # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR @@ -15,7 +15,7 @@ # name: YlGnBu_09 # scheme: sequential # interpolate: off -# source: http://colorbrewer.org/ +# source: https://colorbrewer.org/ # type: RGB # 1.000000 1.000000 0.850980 diff --git a/lib/iris/etc/palette/sequential/YlGn_09.txt b/lib/iris/etc/palette/sequential/YlGn_09.txt index 1242b90d59..efbecd5ea7 100644 --- a/lib/iris/etc/palette/sequential/YlGn_09.txt +++ b/lib/iris/etc/palette/sequential/YlGn_09.txt @@ -5,7 +5,7 @@ # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR @@ -15,7 +15,7 @@ # name: YlGn_09 # scheme: sequential # interpolate: off -# source: http://colorbrewer.org/ +# source: https://colorbrewer.org/ # type: RGB # 1.000000 1.000000 0.898039 diff --git a/lib/iris/etc/palette/sequential/YlOrBr_09.txt b/lib/iris/etc/palette/sequential/YlOrBr_09.txt index f6d93c5b9a..caf8886f83 100644 --- a/lib/iris/etc/palette/sequential/YlOrBr_09.txt +++ b/lib/iris/etc/palette/sequential/YlOrBr_09.txt @@ -5,7 +5,7 @@ # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR @@ -15,7 +15,7 @@ # name: YlOrBr_09 # scheme: sequential # interpolate: off -# source: http://colorbrewer.org/ +# source: https://colorbrewer.org/ # type: RGB # 1.000000 1.000000 0.898039 diff --git a/lib/iris/etc/palette/sequential/YlOrRd_09.txt b/lib/iris/etc/palette/sequential/YlOrRd_09.txt index b252f6f684..4d50f5034d 100644 --- a/lib/iris/etc/palette/sequential/YlOrRd_09.txt +++ b/lib/iris/etc/palette/sequential/YlOrRd_09.txt @@ -5,7 +5,7 @@ # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR @@ -15,7 +15,7 @@ # name: YlOrRd_09 # scheme: sequential # interpolate: off -# source: http://colorbrewer.org/ +# source: https://colorbrewer.org/ # type: RGB # 1.000000 1.000000 0.800000 diff --git a/lib/iris/exceptions.py b/lib/iris/exceptions.py index dbc3f523d2..d6d2084d3c 100644 --- a/lib/iris/exceptions.py +++ b/lib/iris/exceptions.py @@ -1,53 +1,53 @@ -# (C) British Crown Copyright 2010 - 2017, Met Office +# Copyright Iris contributors # -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . -""" -Exceptions specific to the Iris package. - -""" - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Exceptions specific to the Iris package.""" class IrisError(Exception): """Base class for errors in the Iris package.""" + pass class CoordinateCollapseError(IrisError): """Raised when a requested coordinate cannot be collapsed.""" + pass class CoordinateNotFoundError(KeyError): """Raised when a search yields no coordinates.""" + pass class CellMeasureNotFoundError(KeyError): """Raised when a search yields no cell measures.""" + + pass + + +class AncillaryVariableNotFoundError(KeyError): + """Raised when a search yields no ancillary variables.""" + + pass + + +class ConnectivityNotFoundError(KeyError): + """Raised when a search yields no connectivities.""" + pass class CoordinateMultiDimError(ValueError): """Raised when a routine doesn't support multi-dimensional coordinates.""" + def __init__(self, msg): # N.B. deferred import to avoid a circular import dependency. import iris.coords + if isinstance(msg, iris.coords.Coord): fmt = "Multi-dimensional coordinate not supported: '%s'" msg = fmt % msg.name() @@ -56,96 +56,93 @@ def __init__(self, msg): class CoordinateNotRegularError(ValueError): """Raised when a coordinate is unexpectedly irregular.""" + pass class InvalidCubeError(IrisError): """Raised when a Cube validation check fails.""" + pass class ConstraintMismatchError(IrisError): - """ - Raised when a constraint operation has failed to find the correct number - of results. + """Raised when a constraint operation has failed to find the correct number of results.""" - """ pass class NotYetImplementedError(IrisError): - """ - Raised by missing functionality. + """Raised by missing functionality. Different meaning to NotImplementedError, which is for abstract methods. """ + pass class TranslationError(IrisError): """Raised when Iris is unable to translate format-specific codes.""" + pass class IgnoreCubeException(IrisError): - """ - Raised from a callback function when a cube should be ignored on load. + """Raised from a callback function when a cube should be ignored on load.""" - """ pass class ConcatenateError(IrisError): - """ - Raised when concatenate is expected to produce a single cube, but fails to - do so. + """Raised when concatenate is expected to produce a single cube, but fails to do so.""" - """ def __init__(self, differences): - """ - Creates a ConcatenateError with a list of textual descriptions of - the differences which prevented a concatenate. + """Create a ConcatenateError with a list of textual descriptions of differences. - Args: + Create a ConcatenateError with a list of textual descriptions of + the differences which prevented a concatenate. - * differences: + Parameters + ---------- + differences : list of str The list of strings which describe the differences. """ self.differences = differences def __str__(self): - return '\n '.join(['failed to concatenate into a single cube.'] + - list(self.differences)) + return "\n ".join( + ["failed to concatenate into a single cube."] + list(self.differences) + ) class MergeError(IrisError): - """ - Raised when merge is expected to produce a single cube, but fails to - do so. + """Raised when merge is expected to produce a single cube, but fails to do so.""" - """ def __init__(self, differences): - """ + """Create a MergeError with a list of textual descriptions of the differences. + Creates a MergeError with a list of textual descriptions of the differences which prevented a merge. - Args: - - * differences: + Parameters + ---------- + differences : list of str The list of strings which describe the differences. """ self.differences = differences def __str__(self): - return '\n '.join(['failed to merge into a single cube.'] + - list(self.differences)) + return "\n ".join( + ["failed to merge into a single cube."] + list(self.differences) + ) class DuplicateDataError(MergeError): """Raised when merging two or more cubes that have identical metadata.""" + def __init__(self, msg): self.differences = [msg] @@ -156,4 +153,11 @@ class LazyAggregatorError(Exception): class UnitConversionError(IrisError): """Raised when Iris is unable to convert a unit.""" + + pass + + +class CannotAddError(ValueError): + """Raised when an object (e.g. coord) cannot be added to a :class:`~iris.cube.Cube`.""" + pass diff --git a/lib/iris/experimental/__init__.py b/lib/iris/experimental/__init__.py index eb29818f80..eea4259355 100644 --- a/lib/iris/experimental/__init__.py +++ b/lib/iris/experimental/__init__.py @@ -1,26 +1,10 @@ -# (C) British Crown Copyright 2010 - 2015, Met Office +# Copyright Iris contributors # -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . -""" -Experimental code can be introduced to Iris through this package. +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Experimental code can be introduced to Iris through this package. Changes to experimental code may be more extensive than in the rest of the codebase. The code is expected to graduate, eventually, to "full status". """ - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa diff --git a/lib/iris/experimental/animate.py b/lib/iris/experimental/animate.py index ac60400ba7..13c1613802 100644 --- a/lib/iris/experimental/animate.py +++ b/lib/iris/experimental/animate.py @@ -1,132 +1,34 @@ -# (C) British Crown Copyright 2013 - 2017, Met Office +# Copyright Iris contributors # -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . -""" -Wrapper for animating iris cubes using iris or matplotlib plotting functions - -""" - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Wrapper for animating iris cubes using iris or matplotlib plotting functions. -import warnings +Notes +----- +.. deprecated:: 3.4.0 -import matplotlib.pyplot as plt -import matplotlib.animation as animation + ``iris.experimental.animate.animate()`` has been moved to + :func:`iris.plot.animate`. This module will therefore be removed in a future + release. -import iris +""" def animate(cube_iterator, plot_func, fig=None, **kwargs): - """ - Animates the given cube iterator. - - Args: - - * cube_iterator (iterable of :class:`iris.cube.Cube` objects): - Each animation frame corresponds to each :class:`iris.cube.Cube` - object. See :meth:`iris.cube.Cube.slices`. - - * plot_func (:mod:`iris.plot` or :mod:`iris.quickplot` plotting function): - Plotting function used to animate. Must accept the signature - ``plot_func(cube, vmin=vmin, vmax=vmax, coords=coords)``. - :func:`~iris.plot.contourf`, :func:`~iris.plot.contour`, - :func:`~iris.plot.pcolor` and :func:`~iris.plot.pcolormesh` - all conform to this signature. - - Kwargs: - - * fig (:class:`matplotlib.figure.Figure` instance): - By default, the current figure will be used or a new figure instance - created if no figure is available. See :func:`matplotlib.pyplot.gcf`. + """Animate the given cube iterator. - * coords (list of :class:`~iris.coords.Coord` objects or coordinate names): - Use the given coordinates as the axes for the plot. The order of the - given coordinates indicates which axis to use for each, where the first - element is the horizontal axis of the plot and the second element is - the vertical axis of the plot. + Warnings + -------- + This function is now **disabled**. - * interval (int, float or long): - Defines the time interval in milliseconds between successive frames. - A default interval of 100ms is set. - - * vmin, vmax (int, float or long): - Color scaling values, see :class:`matplotlib.colors.Normalize` for - further details. Default values are determined by the min-max across - the data set over the entire sequence. - - See :class:`matplotlib.animation.FuncAnimation` for details of other valid - keyword arguments. - - Returns: - :class:`~matplotlib.animation.FuncAnimation` object suitable for - saving and or plotting. - - For example, to animate along a set of cube slices:: - - cube_iter = cubes.slices(('grid_longitude', 'grid_latitude')) - ani = animate(cube_iter, qplt.contourf) - plt.show() + The functionality has been moved to :func:`iris.plot.animate`. """ - kwargs.setdefault('interval', 100) - coords = kwargs.pop('coords', None) - - if fig is None: - fig = plt.gcf() - - def update_animation_iris(i, cubes, vmin, vmax, coords): - # Clearing the figure is currently necessary for compatibility with - # the iris quickploting module - due to the colorbar. - plt.gcf().clf() - plot_func(cubes[i], vmin=vmin, vmax=vmax, coords=coords) - - # Turn cube iterator into a list to determine plot ranges. - # NOTE: we check that we are not providing a cube as this has a deprecated - # iter special method. - if (hasattr(cube_iterator, '__iter__') and not - isinstance(cube_iterator, iris.cube.Cube)): - cubes = iris.cube.CubeList(cube_iterator) - else: - msg = 'iterable type object required for animation, {} given'.format( - type(cube_iterator)) - raise TypeError(msg) - - supported = ['iris.plot', 'iris.quickplot'] - if plot_func.__module__ not in supported: - msg = ('Given plotting module "{}" may not be supported, intended ' - 'use: {}.') - msg = msg.format(plot_func.__module__, supported) - warnings.warn(msg, UserWarning) - - supported = ['contour', 'contourf', 'pcolor', 'pcolormesh'] - if plot_func.__name__ not in supported: - msg = ('Given plotting function "{}" may not be supported, intended ' - 'use: {}.') - msg = msg.format(plot_func.__name__, supported) - warnings.warn(msg, UserWarning) - - # Determine plot range. - vmin = kwargs.pop('vmin', min([cc.data.min() for cc in cubes])) - vmax = kwargs.pop('vmax', max([cc.data.max() for cc in cubes])) - - update = update_animation_iris - frames = range(len(cubes)) - - return animation.FuncAnimation(fig, update, - frames=frames, - fargs=(cubes, vmin, vmax, coords), - **kwargs) + msg = ( + "The function 'iris.experimental.animate.animate()' has been moved, " + "and is now at 'iris.plot.animate()'.\n" + "Please replace 'iris.experimental.animate.animate' with " + "'iris.plot.animate'." + ) + raise Exception(msg) diff --git a/lib/iris/experimental/concatenate.py b/lib/iris/experimental/concatenate.py deleted file mode 100644 index dcf3fcdd22..0000000000 --- a/lib/iris/experimental/concatenate.py +++ /dev/null @@ -1,48 +0,0 @@ -# (C) British Crown Copyright 2013 - 2015, Met Office -# -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . -""" -Automatic concatenation of multiple cubes over one or more existing dimensions. - -.. warning:: - - This functionality has now been moved to - :meth:`iris.cube.CubeList.concatenate`. - -""" - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - - -def concatenate(cubes): - """ - Concatenate the provided cubes over common existing dimensions. - - .. warning:: - - This function is now **disabled**. - - The functionality has been moved to - :meth:`iris.cube.CubeList.concatenate`. - - """ - raise Exception( - 'The function "iris.experimental.concatenate.concatenate" has been ' - 'moved, and is now a CubeList instance method.' - '\nPlease replace ' - '"iris.experimental.concatenate.concatenate()" with ' - '"iris.cube.CubeList().concatenate()".') diff --git a/lib/iris/experimental/equalise_cubes.py b/lib/iris/experimental/equalise_cubes.py deleted file mode 100644 index 8a85cded97..0000000000 --- a/lib/iris/experimental/equalise_cubes.py +++ /dev/null @@ -1,55 +0,0 @@ -# (C) British Crown Copyright 2013 - 2016, Met Office -# -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . -""" -Experimental cube-adjusting functions to assist merge operations. - -""" - -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - -import numpy as np - - -def equalise_attributes(cubes): - """ - Delete cube attributes that are not identical over all cubes in a group. - - This function simply deletes any attributes which are not the same for - all the given cubes. The cubes will then have identical attributes. The - given cubes are modified in-place. - - Args: - - * cubes (iterable of :class:`iris.cube.Cube`): - A collection of cubes to compare and adjust. - - """ - # Work out which attributes are identical across all the cubes. - common_keys = list(cubes[0].attributes.keys()) - for cube in cubes[1:]: - cube_keys = list(cube.attributes.keys()) - common_keys = [ - key for key in common_keys - if (key in cube_keys and - np.all(cube.attributes[key] == cubes[0].attributes[key]))] - - # Remove all the other attributes. - for cube in cubes: - for key in list(cube.attributes.keys()): - if key not in common_keys: - del cube.attributes[key] diff --git a/lib/iris/experimental/geovista.py b/lib/iris/experimental/geovista.py new file mode 100644 index 0000000000..57cbded2c2 --- /dev/null +++ b/lib/iris/experimental/geovista.py @@ -0,0 +1,335 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Experimental module for using some GeoVista operations with Iris cubes.""" + +from geovista import Transform +from geovista.common import VTK_CELL_IDS, VTK_POINT_IDS + +from iris.exceptions import CoordinateNotFoundError +from iris.mesh import MeshXY + + +def _get_coord(cube, axis): + """Get the axis coordinates from the cube.""" + try: + coord = cube.coord(axis=axis, dim_coords=True) + except CoordinateNotFoundError: + coord = cube.coord(axis=axis) + return coord + + +def cube_to_polydata(cube, **kwargs): + r"""Create a :class:`pyvista.PolyData` object from a :class:`~iris.cube.Cube`. + + The resulting :class:`~pyvista.PolyData` object can be plotted using + a :class:`geovista.geoplotter.GeoPlotter`. + + Uses :class:`geovista.bridge.Transform` to parse the cube's information - one + of: :meth:`~geovista.bridge.Transform.from_1d` / + :meth:`~geovista.bridge.Transform.from_2d` / + :meth:`~geovista.bridge.Transform.from_unstructured`. + + Parameters + ---------- + cube : :class:`~iris.cube.Cube` + The Cube containing the spatial information and data for creating the + :class:`~pyvista.PolyData`. + + **kwargs : dict, optional + Additional keyword arguments to be passed to the relevant + :class:`~geovista.bridge.Transform` method (e.g ``zlevel``). + + Returns + ------- + :class:`~pyvista.PolyData` + The PolyData object representing the cube's spatial information and data. + + Raises + ------ + NotImplementedError + If a :class:`~iris.cube.Cube` with too many dimensions is passed. Only + the horizontal data can be represented, meaning a 2D Cube, or 1D Cube + if the horizontal space is described by + :class:`~iris.mesh.MeshCoord`\ s. + + Examples + -------- + .. testsetup:: + + from iris import load_cube, sample_data_path + + cube = load_cube(sample_data_path("air_temp.pp")) + cube_w_time = load_cube(sample_data_path("A1B_north_america.nc")) + cube_mesh = load_cube(sample_data_path("mesh_C4_synthetic_float.nc")) + + >>> from iris.experimental.geovista import cube_to_polydata + + Converting a standard 2-dimensional :class:`~iris.cube.Cube` with + 1-dimensional coordinates: + + >>> print(cube.summary(shorten=True)) + air_temperature / (K) (latitude: 73; longitude: 96) + >>> print(cube_to_polydata(cube)) + PolyData (... + N Cells: 7008 + N Points: 7178 + N Strips: 0 + X Bounds: -9.992e-01, 9.992e-01 + Y Bounds: -9.992e-01, 9.992e-01 + Z Bounds: -1.000e+00, 1.000e+00 + N Arrays: 4 + + Configure the conversion by passing additional keyword arguments: + + >>> print(cube_to_polydata(cube, radius=2)) + PolyData (... + N Cells: 7008 + N Points: 7178 + N Strips: 0 + X Bounds: -1.998e+00, 1.998e+00 + Y Bounds: -1.998e+00, 1.998e+00 + Z Bounds: -2.000e+00, 2.000e+00 + N Arrays: 4 + + Converting a :class:`~iris.cube.Cube` that has a + :attr:`~iris.cube.Cube.mesh` describing its horizontal space: + + >>> print(cube_mesh.summary(shorten=True)) + synthetic / (1) (-- : 96) + >>> print(cube_to_polydata(cube_mesh)) + PolyData (... + N Cells: 96 + N Points: 98 + N Strips: 0 + X Bounds: -1.000e+00, 1.000e+00 + Y Bounds: -1.000e+00, 1.000e+00 + Z Bounds: -1.000e+00, 1.000e+00 + N Arrays: 4 + + Remember to reduce the dimensionality of your :class:`~iris.cube.Cube` to + just be the horizontal space: + + >>> print(cube_w_time.summary(shorten=True)) + air_temperature / (K) (time: 240; latitude: 37; longitude: 49) + >>> print(cube_to_polydata(cube_w_time[0, :, :])) + PolyData (... + N Cells: 1813 + N Points: 1900 + N Strips: 0 + X Bounds: -6.961e-01, 6.961e-01 + Y Bounds: -9.686e-01, -3.411e-01 + Z Bounds: 2.483e-01, 8.714e-01 + N Arrays: 4 + + """ + if cube.mesh: + if cube.ndim != 1: + raise NotImplementedError("Cubes with a mesh must be one dimensional") + lons, lats = cube.mesh.node_coords + face_node = cube.mesh.face_node_connectivity + indices = face_node.indices_by_location() + + polydata = Transform.from_unstructured( + xs=lons.points, + ys=lats.points, + connectivity=indices, + data=cube.data, + name=f"{cube.name()} / ({cube.units})", + start_index=face_node.start_index, + **kwargs, + ) + # TODO: Add support for point clouds + elif cube.ndim == 2: + x_coord = _get_coord(cube, "X") + y_coord = _get_coord(cube, "Y") + transform_kwargs = dict( + xs=x_coord.contiguous_bounds(), + ys=y_coord.contiguous_bounds(), + data=cube.data, + name=f"{cube.name()} / ({cube.units})", + **kwargs, + ) + coord_system = cube.coord_system() + if coord_system: + transform_kwargs["crs"] = coord_system.as_cartopy_crs().proj4_init + + if x_coord.ndim == 2 and y_coord.ndim == 2: + polydata = Transform.from_2d(**transform_kwargs) + + elif x_coord.ndim == 1 and y_coord.ndim == 1: + polydata = Transform.from_1d(**transform_kwargs) + + else: + raise NotImplementedError("Only 1D and 2D coordinates are supported") + else: + raise NotImplementedError("Cube must have a mesh or have 2 dimensions") + + return polydata + + +def extract_unstructured_region(cube, polydata, region, **kwargs): + """Index a :class:`~iris.cube.Cube` with a :attr:`~iris.cube.Cube.mesh` to a specific region. + + Uses :meth:`geovista.geodesic.BBox.enclosed` to identify the `cube` indices + that are within the specified region (`region` being a + :class:`~geovista.geodesic.BBox` class). + + Parameters + ---------- + cube : :class:`~iris.cube.Cube` + The cube to be indexed (must have a :attr:`~iris.cube.Cube.mesh`). + polydata : :class:`pyvista.PolyData` + A :class:`~pyvista.PolyData` representing the same horizontal space as + `cube`. The region extraction is first applied to `polydata`, with the + resulting indices then applied to `cube`. In many cases `polydata` can + be created by applying :func:`cube_to_polydata` to `cube`. + region : :class:`geovista.geodesic.BBox` + A :class:`~geovista.geodesic.BBox` representing the region to be + extracted. + **kwargs : dict, optional + Additional keyword arguments to be passed to the + :meth:`geovista.geodesic.BBox.enclosed` method (e.g ``preference``). + + Returns + ------- + :class:`~iris.cube.Cube` + The region extracted cube. + + Raises + ------ + ValueError + If `polydata` and the :attr:`~iris.cube.Cube.mesh` on `cube` do not + have the same shape. + + Examples + -------- + .. testsetup:: + + from iris import load_cube, sample_data_path + from iris.coords import AuxCoord + from iris.cube import CubeList + + file_path = sample_data_path("mesh_C4_synthetic_float.nc") + cube_w_mesh = load_cube(file_path) + + level_cubes = CubeList() + for height_level in range(72): + height_coord = AuxCoord([height_level], standard_name="height") + level_cube = cube_w_mesh.copy() + level_cube.add_aux_coord(height_coord) + level_cubes.append(level_cube) + + cube_w_mesh = level_cubes.merge_cube() + other_cube_w_mesh = cube_w_mesh[:20, :] + + The parameters of :func:`extract_unstructured_region` have been designed with + flexibility and reuse in mind. This is demonstrated below. + + >>> from geovista.geodesic import BBox + >>> from iris.experimental.geovista import cube_to_polydata, extract_unstructured_region + >>> print(cube_w_mesh.shape) + (72, 96) + >>> # The mesh dimension represents the horizontal space of the cube. + >>> print(cube_w_mesh.shape[cube_w_mesh.mesh_dim()]) + 96 + >>> cube_polydata = cube_to_polydata(cube_w_mesh[0, :]) + >>> extracted_cube = extract_unstructured_region( + ... cube=cube_w_mesh, + ... polydata=cube_polydata, + ... region=BBox(lons=[0, 70, 70, 0], lats=[-25, -25, 45, 45]), + ... ) + >>> print(extracted_cube.shape) + (72, 11) + + Now reuse the same `cube` and `polydata` to extract a different region: + + >>> new_region = BBox(lons=[0, 35, 35, 0], lats=[-25, -25, 45, 45]) + >>> extracted_cube = extract_unstructured_region( + ... cube=cube_w_mesh, + ... polydata=cube_polydata, + ... region=new_region, + ... ) + >>> print(extracted_cube.shape) + (72, 6) + + Now apply the same region extraction to a different `cube` that has the + same horizontal shape: + + >>> print(other_cube_w_mesh.shape) + (20, 96) + >>> extracted_cube = extract_unstructured_region( + ... cube=other_cube_w_mesh, + ... polydata=cube_polydata, + ... region=new_region, + ... ) + >>> print(extracted_cube.shape) + (20, 6) + + Arbitrary keywords can be passed down to + :meth:`geovista.geodesic.BBox.enclosed` (``outside`` in this example): + + >>> extracted_cube = extract_unstructured_region( + ... cube=other_cube_w_mesh, + ... polydata=cube_polydata, + ... region=new_region, + ... outside=True, + ... ) + >>> print(extracted_cube.shape) + (20, 90) + + """ + if cube.mesh: + # Find what dimension the mesh is in on the cube + mesh_dim = cube.mesh_dim() + recreate_mesh = False + + if cube.location == "face": + polydata_length = polydata.GetNumberOfCells() + indices_key = VTK_CELL_IDS + recreate_mesh = True + elif cube.location == "node": + polydata_length = polydata.GetNumberOfPoints() + indices_key = VTK_POINT_IDS + else: + raise NotImplementedError( + f"cube.location must be `face` or `node`. Found: {cube.location}." + ) + + if cube.shape[mesh_dim] != polydata_length: + raise ValueError( + f"The mesh on the cube and the polydata" + f"must have the same shape." + f" Found Mesh: {cube.shape[mesh_dim]}," + f" Polydata: {polydata_length}." + ) + + region_polydata = region.enclosed(polydata, **kwargs) + indices = region_polydata[indices_key] + if len(indices) == 0: + raise IndexError("No part of `polydata` falls within `region`.") + + my_tuple = tuple( + [slice(None) if i != mesh_dim else indices for i in range(cube.ndim)] + ) + + region_cube = cube[my_tuple] + + if recreate_mesh: + coords_on_mesh_dim = region_cube.coords(dimensions=mesh_dim) + new_mesh = MeshXY.from_coords( + *[c for c in coords_on_mesh_dim if c.has_bounds()] + ) + + new_mesh_coords = new_mesh.to_MeshCoords(cube.location) + + for coord in new_mesh_coords: + region_cube.remove_coord(coord.name()) + region_cube.add_aux_coord(coord, mesh_dim) + + # TODO: Support unstructured point based data without a mesh + else: + raise ValueError("Cube must have a mesh") + + return region_cube diff --git a/lib/iris/experimental/raster.py b/lib/iris/experimental/raster.py index aca3fdadca..52ef2f651b 100644 --- a/lib/iris/experimental/raster.py +++ b/lib/iris/experimental/raster.py @@ -1,95 +1,91 @@ -# (C) British Crown Copyright 2013 - 2017, Met Office +# Copyright Iris contributors # -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . -""" -Experimental module for importing/exporting raster data from Iris cubes using -the GDAL library. +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Experimental module for importing/exporting raster data from Iris cubes using the GDAL library. -See also: `GDAL - Geospatial Data Abstraction Library `_. +See also: `GDAL - Geospatial Data Abstraction Library `_. TODO: If this module graduates from experimental the (optional) GDAL dependency should be added to INSTALL """ -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa - +import cf_units import numpy as np import numpy.ma as ma from osgeo import gdal, osr -import cf_units - import iris +from iris._deprecation import warn_deprecated import iris.coord_systems +wmsg = ( + "iris.experimental.raster is deprecated since version 3.2, and will be " + "removed in a future release. If you make use of this functionality, " + "please contact the Iris Developers to discuss how to retain it (which may " + "involve reversing the deprecation)." +) +warn_deprecated(wmsg) _GDAL_DATATYPES = { - 'i2': gdal.GDT_Int16, - 'i4': gdal.GDT_Int32, - 'u1': gdal.GDT_Byte, - 'u2': gdal.GDT_UInt16, - 'u4': gdal.GDT_UInt32, - 'f4': gdal.GDT_Float32, - 'f8': gdal.GDT_Float64, + "i2": gdal.GDT_Int16, + "i4": gdal.GDT_Int32, + "u1": gdal.GDT_Byte, + "u2": gdal.GDT_UInt16, + "u4": gdal.GDT_UInt32, + "f4": gdal.GDT_Float32, + "f8": gdal.GDT_Float64, } -def _gdal_write_array(x_min, x_step, y_max, y_step, coord_system, data, fname, - ftype): - """ - Use GDAL WriteArray to export data as a 32-bit raster image. +def _gdal_write_array(x_min, x_step, y_max, y_step, coord_system, data, fname, ftype): + """Use GDAL WriteArray to export data as a 32-bit raster image. + Requires the array data to be of the form: North-at-top and West-on-left. - Args: - * x_min: Minimum X coordinate bounds value. - * x_step: Change in X coordinate per cell. - * y_max: Maximum Y coordinate bounds value. - * y_step: Change in Y coordinate per cell. - * coord_system (iris.coord_systems.CoordSystem): - Coordinate system for X and Y. - * data (numpy.ndarray): 2d array of values to export - * fname (string): Output file name. - * ftype (string): Export file type. - - .. note:: - - Projection information is currently not written to the output. + Parameters + ---------- + x_min : + Minimum X coordinate bounds value. + x_step : + Change in X coordinate per cell. + y_max : + Maximum Y coordinate bounds value. + y_step : + Change in Y coordinate per cell. + coord_system : iris.coord_systems.CoordSystem + Coordinate system for X and Y. + data : numpy.ndarray + 2d array of values to export. + fname : str + Output file name. + ftype : str + Export file type. + + Notes + ----- + Projection information is currently not written to the output. """ byte_order = data.dtype.str[0] format = data.dtype.str[1:] dtype = _GDAL_DATATYPES.get(format) if dtype is None: - raise ValueError('Unsupported data type: {}'.format(data.dtype)) + raise ValueError("Unsupported data type: {}".format(data.dtype)) driver = gdal.GetDriverByName(ftype) - gdal_dataset = driver.Create(fname, data.shape[1], data.shape[0], - 1, dtype) + gdal_dataset = driver.Create(fname, data.shape[1], data.shape[0], 1, dtype) # Where possible, set the projection. if coord_system is not None: srs = osr.SpatialReference() proj4_defn = coord_system.as_cartopy_crs().proj4_init # GDAL can't cope with "+proj=lonlat" which Cartopy produces. - proj4_defn = proj4_defn.replace('lonlat', 'longlat') + proj4_defn = proj4_defn.replace("lonlat", "longlat") if srs.ImportFromProj4(proj4_defn): - msg = 'Unsupported coordinate system: {}'.format(coord_system) + msg = "Unsupported coordinate system: {}".format(coord_system) raise ValueError(msg) gdal_dataset.SetProjection(srs.ExportToWkt()) @@ -103,63 +99,84 @@ def _gdal_write_array(x_min, x_step, y_max, y_step, coord_system, data, fname, data[data.mask] = data.fill_value band.SetNoDataValue(float(data.fill_value)) # GeoTIFF always needs little-endian data. - if byte_order == '>': - data = data.astype(data.dtype.newbyteorder('<')) + if byte_order == ">": + data = data.astype(data.dtype.newbyteorder("<")) band.WriteArray(data) def export_geotiff(cube, fname): - """ - Writes cube data to raster file format as a PixelIsArea GeoTiff image. + """Write cube data to raster file format as a PixelIsArea GeoTiff image. + + Parameters + ---------- + cube : Cube + The 2D regularly gridded cube slice to be exported. + The cube must have regular, contiguous bounds. + fname : str + Output file name. - Args: - * cube (Cube): The 2D regularly gridded cube slice to be exported. - The cube must have regular, contiguous bounds. - * fname (string): Output file name. + Notes + ----- + For more details on GeoTiff specification and PixelIsArea, see: + https://www.remotesensing.org/geotiff/spec/geotiff2.5.html#2.5.2.2 - .. note:: + .. deprecated:: 3.2.0 - For more details on GeoTiff specification and PixelIsArea, see: - http://www.remotesensing.org/geotiff/spec/geotiff2.5.html#2.5.2.2 + This method is scheduled to be removed in a future release, and no + replacement is currently planned. + If you make use of this functionality, please contact the Iris + Developers to discuss how to retain it (which could include reversing + the deprecation). """ + wmsg = ( + "iris.experimental.raster.export_geotiff has been deprecated, and will " + "be removed in a future release. Please consult the docstring for " + "details." + ) + warn_deprecated(wmsg) + if cube.ndim != 2: raise ValueError("The cube must be two dimensional.") - coord_x = cube.coord(axis='X', dim_coords=True) - coord_y = cube.coord(axis='Y', dim_coords=True) + coord_x = cube.coord(axis="X", dim_coords=True) + coord_y = cube.coord(axis="Y", dim_coords=True) if coord_x.bounds is None or coord_y.bounds is None: - raise ValueError('Coordinates must have bounds, consider using ' - 'guess_bounds()') + raise ValueError("Coordinates must have bounds, consider using guess_bounds()") - if coord_x is None or coord_y is None or \ - coord_x.coord_system != coord_y.coord_system: - raise ValueError('The X and Y coordinates must share a CoordSystem.') + if ( + coord_x is None + or coord_y is None + or coord_x.coord_system != coord_y.coord_system + ): + raise ValueError("The X and Y coordinates must share a CoordSystem.") xy_step = [] for coord in [coord_x, coord_y]: name = coord.name() if coord.nbounds != 2: - msg = 'Coordinate {!r} must have two bounds ' \ - 'per point.'.format(name) + msg = "Coordinate {!r} must have two bounds per point.".format(name) raise ValueError(msg) - if not (coord.units == cf_units.Unit('degrees') or - coord.units.is_convertible('meters')): - raise ValueError('Coordinate {!r} units must be either degrees or ' - 'convertible to meters.'.format(name)) + if not ( + coord.units == cf_units.Unit("degrees") + or coord.units.is_convertible("meters") + ): + raise ValueError( + "Coordinate {!r} units must be either degrees or " + "convertible to meters.".format(name) + ) if not coord.is_contiguous(): - raise ValueError('Coordinate {!r} bounds must be ' - 'contiguous.'.format(name)) + raise ValueError("Coordinate {!r} bounds must be contiguous.".format(name)) xy_step.append(np.diff(coord.bounds[0])) if not np.allclose(np.diff(coord.bounds), xy_step[-1]): - msg = 'Coordinate {!r} bounds must be regularly ' \ - 'spaced.'.format(name) + msg = "Coordinate {!r} bounds must be regularly spaced.".format(name) raise ValueError(msg) if coord_x.points[0] > coord_x.points[-1]: - raise ValueError('Coordinate {!r} x-points must be monotonically' - 'increasing.'.format(name)) + raise ValueError( + "Coordinate {!r} x-points must be monotonically increasing.".format(name) + ) data = cube.data @@ -185,5 +202,4 @@ def export_geotiff(cube, fname): x_min = np.min(x_bounds) y_max = np.max(coord_y.bounds) - _gdal_write_array(x_min, x_step, y_max, y_step, coord_system, data, fname, - 'GTiff') + _gdal_write_array(x_min, x_step, y_max, y_step, coord_system, data, fname, "GTiff") diff --git a/lib/iris/experimental/regrid.py b/lib/iris/experimental/regrid.py index edcff3d3c4..4ffad43a2c 100644 --- a/lib/iris/experimental/regrid.py +++ b/lib/iris/experimental/regrid.py @@ -1,637 +1,59 @@ -# (C) British Crown Copyright 2013 - 2019, Met Office +# Copyright Iris contributors # -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . -""" -Regridding functions. +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Regridding functions. + +Notes +----- +.. deprecated:: 3.2.0 + + This package will be removed in a future release. + The PointInCell class has now moved to :class:`iris.analysis.PointInCell`. + All the other content will be withdrawn. + + If you still use any of this, please contact the Iris Developers to + discuss how to replace it or to retain it. """ -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa -import six -from collections import namedtuple import copy import functools import warnings import cartopy.crs as ccrs -import cf_units import numpy as np -import numpy.ma as ma import scipy.interpolate -import six -import iris.analysis.cartography -from iris.analysis._interpolation import (get_xy_dim_coords, get_xy_coords, - snapshot_grid) +from iris._deprecation import warn_deprecated +from iris.analysis._area_weighted import ( + _regrid_area_weighted_rectilinear_src_and_grid__perform, + _regrid_area_weighted_rectilinear_src_and_grid__prepare, +) +from iris.analysis._interpolation import get_xy_coords, get_xy_dim_coords, snapshot_grid from iris.analysis._regrid import ( - RectilinearRegridder, + _regrid_weighted_curvilinear_to_rectilinear__perform, _regrid_weighted_curvilinear_to_rectilinear__prepare, - _regrid_weighted_curvilinear_to_rectilinear__perform ) +import iris.analysis.cartography import iris.coord_systems import iris.cube from iris.util import _meshgrid +from iris.warnings import IrisImpossibleUpdateWarning + +wmsg = ( + "The 'iris.experimental.regrid' package is deprecated since version 3.2, " + "and will be removed in a future release. The PointInCell class has now " + "moved into iris.analysis. All its other content will be withdrawn. " + "If you still use any of this, please contact the Iris Developers to " + "discuss how to replace it or to retain it (reverse the deprecation)." +) +warn_deprecated(wmsg) -_Version = namedtuple('Version', ('major', 'minor', 'micro')) -_NP_VERSION = _Version(*(int(val) for val in - np.version.version.split('.') if val.isdigit())) - - -def _get_xy_coords(cube): - """ - Return the x and y coordinates from a cube. - - This function will preferentially return a pair of dimension - coordinates (if there are more than one potential x or y dimension - coordinates a ValueError will be raised). If the cube does not have - a pair of x and y dimension coordinates it will return 1D auxiliary - coordinates (including scalars). If there is not one and only one set - of x and y auxiliary coordinates a ValueError will be raised. - - Having identified the x and y coordinates, the function checks that they - have equal coordinate systems and that they do not occupy the same - dimension on the cube. - - Args: - - * cube: - An instance of :class:`iris.cube.Cube`. - - Returns: - A tuple containing the cube's x and y coordinates. - - """ - # Look for a suitable dimension coords first. - x_coords = cube.coords(axis='x', dim_coords=True) - if not x_coords: - # If there is no x coord in dim_coords look for scalars or - # monotonic coords in aux_coords. - x_coords = [coord for coord in cube.coords(axis='x', dim_coords=False) - if coord.ndim == 1 and coord.is_monotonic()] - if len(x_coords) != 1: - raise ValueError('Cube {!r} must contain a single 1D x ' - 'coordinate.'.format(cube.name())) - x_coord = x_coords[0] - - # Look for a suitable dimension coords first. - y_coords = cube.coords(axis='y', dim_coords=True) - if not y_coords: - # If there is no y coord in dim_coords look for scalars or - # monotonic coords in aux_coords. - y_coords = [coord for coord in cube.coords(axis='y', dim_coords=False) - if coord.ndim == 1 and coord.is_monotonic()] - if len(y_coords) != 1: - raise ValueError('Cube {!r} must contain a single 1D y ' - 'coordinate.'.format(cube.name())) - y_coord = y_coords[0] - - if x_coord.coord_system != y_coord.coord_system: - raise ValueError("The cube's x ({!r}) and y ({!r}) " - "coordinates must have the same coordinate " - "system.".format(x_coord.name(), y_coord.name())) - - # The x and y coordinates must describe different dimensions - # or be scalar coords. - x_dims = cube.coord_dims(x_coord) - x_dim = None - if x_dims: - x_dim = x_dims[0] - - y_dims = cube.coord_dims(y_coord) - y_dim = None - if y_dims: - y_dim = y_dims[0] - - if x_dim is not None and y_dim == x_dim: - raise ValueError("The cube's x and y coords must not describe the " - "same data dimension.") - - return x_coord, y_coord - - -def _within_bounds(src_bounds, tgt_bounds, orderswap=False): - """ - Determine which target bounds lie within the extremes of the source bounds. - - Args: - - * src_bounds (ndarray): - An (n, 2) shaped array of monotonic contiguous source bounds. - * tgt_bounds (ndarray): - An (n, 2) shaped array corresponding to the target bounds. - - Kwargs: - - * orderswap (bool): - A Boolean indicating whether the target bounds are in descending order - (True). Defaults to False. - - Returns: - Boolean ndarray, indicating whether each target bound is within the - extremes of the source bounds. - - """ - min_bound = np.min(src_bounds) - 1e-14 - max_bound = np.max(src_bounds) + 1e-14 - - # Swap upper-lower is necessary. - if orderswap is True: - upper, lower = tgt_bounds.T - else: - lower, upper = tgt_bounds.T - - return (((lower <= max_bound) * (lower >= min_bound)) * - ((upper <= max_bound) * (upper >= min_bound))) - - -def _cropped_bounds(bounds, lower, upper): - """ - Return a new bounds array and corresponding slice object (or indices) of - the original data array, resulting from cropping the provided bounds - between the specified lower and upper values. The bounds at the - extremities will be truncated so that they start and end with lower and - upper. - - This function will return an empty NumPy array and slice if there is no - overlap between the region covered by bounds and the region from lower to - upper. - - If lower > upper the resulting bounds may not be contiguous and the - indices object will be a tuple of indices rather than a slice object. - - Args: - - * bounds: - An (n, 2) shaped array of monotonic contiguous bounds. - * lower: - Lower bound at which to crop the bounds array. - * upper: - Upper bound at which to crop the bounds array. - - Returns: - A tuple of the new bounds array and the corresponding slice object or - indices from the zeroth axis of the original array. - - """ - reversed_flag = False - # Ensure order is increasing. - if bounds[0, 0] > bounds[-1, 0]: - # Reverse bounds - bounds = bounds[::-1, ::-1] - reversed_flag = True - - # Number of bounds. - n = bounds.shape[0] - - if lower <= upper: - if lower > bounds[-1, 1] or upper < bounds[0, 0]: - new_bounds = bounds[0:0] - indices = slice(0, 0) - else: - # A single region lower->upper. - if lower < bounds[0, 0]: - # Region extends below bounds so use first lower bound. - l = 0 - lower = bounds[0, 0] - else: - # Index of last lower bound less than or equal to lower. - l = np.nonzero(bounds[:, 0] <= lower)[0][-1] - if upper > bounds[-1, 1]: - # Region extends above bounds so use last upper bound. - u = n - 1 - upper = bounds[-1, 1] - else: - # Index of first upper bound greater than or equal to - # upper. - u = np.nonzero(bounds[:, 1] >= upper)[0][0] - # Extract the bounds in our region defined by lower->upper. - new_bounds = np.copy(bounds[l:(u + 1), :]) - # Replace first and last values with specified bounds. - new_bounds[0, 0] = lower - new_bounds[-1, 1] = upper - if reversed_flag: - indices = slice(n - (u + 1), n - l) - else: - indices = slice(l, u + 1) - else: - # Two regions [0]->upper, lower->[-1] - # [0]->upper - if upper < bounds[0, 0]: - # Region outside src bounds. - new_bounds_left = bounds[0:0] - indices_left = tuple() - slice_left = slice(0, 0) - else: - if upper > bounds[-1, 1]: - # Whole of bounds. - u = n - 1 - upper = bounds[-1, 1] - else: - # Index of first upper bound greater than or equal to upper. - u = np.nonzero(bounds[:, 1] >= upper)[0][0] - # Extract the bounds in our region defined by [0]->upper. - new_bounds_left = np.copy(bounds[0:(u + 1), :]) - # Replace last value with specified bound. - new_bounds_left[-1, 1] = upper - if reversed_flag: - indices_left = tuple(range(n - (u + 1), n)) - slice_left = slice(n - (u + 1), n) - else: - indices_left = tuple(range(0, u + 1)) - slice_left = slice(0, u + 1) - # lower->[-1] - if lower > bounds[-1, 1]: - # Region is outside src bounds. - new_bounds_right = bounds[0:0] - indices_right = tuple() - slice_right = slice(0, 0) - else: - if lower < bounds[0, 0]: - # Whole of bounds. - l = 0 - lower = bounds[0, 0] - else: - # Index of last lower bound less than or equal to lower. - l = np.nonzero(bounds[:, 0] <= lower)[0][-1] - # Extract the bounds in our region defined by lower->[-1]. - new_bounds_right = np.copy(bounds[l:, :]) - # Replace first value with specified bound. - new_bounds_right[0, 0] = lower - if reversed_flag: - indices_right = tuple(range(0, n - l)) - slice_right = slice(0, n - l) - else: - indices_right = tuple(range(l, n)) - slice_right = slice(l, None) - - if reversed_flag: - # Flip everything around. - indices_left, indices_right = indices_right, indices_left - slice_left, slice_right = slice_right, slice_left - - # Combine regions. - new_bounds = np.concatenate((new_bounds_left, new_bounds_right)) - # Use slices if possible, but if we have two regions use indices. - if indices_left and indices_right: - indices = indices_left + indices_right - elif indices_left: - indices = slice_left - elif indices_right: - indices = slice_right - else: - indices = slice(0, 0) - - if reversed_flag: - new_bounds = new_bounds[::-1, ::-1] - - return new_bounds, indices - - -def _cartesian_area(y_bounds, x_bounds): - """ - Return an array of the areas of each cell given two arrays - of cartesian bounds. - - Args: - - * y_bounds: - An (n, 2) shaped NumPy array. - * x_bounds: - An (m, 2) shaped NumPy array. - - Returns: - An (n, m) shaped Numpy array of areas. - - """ - heights = y_bounds[:, 1] - y_bounds[:, 0] - widths = x_bounds[:, 1] - x_bounds[:, 0] - return np.abs(np.outer(heights, widths)) - - -def _spherical_area(y_bounds, x_bounds, radius=1.0): - """ - Return an array of the areas of each cell on a sphere - given two arrays of latitude and longitude bounds in radians. - - Args: - - * y_bounds: - An (n, 2) shaped NumPy array of latitide bounds in radians. - * x_bounds: - An (m, 2) shaped NumPy array of longitude bounds in radians. - * radius: - Radius of the sphere. Default is 1.0. - - Returns: - An (n, m) shaped Numpy array of areas. - - """ - return iris.analysis.cartography._quadrant_area( - y_bounds, x_bounds, radius) - - -def _get_bounds_in_units(coord, units, dtype): - """Return a copy of coord's bounds in the specified units and dtype.""" - # The bounds are cast to dtype before conversion to prevent issues when - # mixing float32 and float64 types. - return coord.units.convert(coord.bounds.astype(dtype), units).astype(dtype) - - -def _weighted_mean_with_mdtol(data, weights, axis=None, mdtol=0): - """ - Return the weighted mean of an array over the specified axis - using the provided weights (if any) and a permitted fraction of - masked data. - - Args: - - * data (array-like): - Data to be averaged. - - * weights (array-like): - An array of the same shape as the data that specifies the contribution - of each corresponding data element to the calculated mean. - - Kwargs: - - * axis (int or tuple of ints): - Axis along which the mean is computed. The default is to compute - the mean of the flattened array. - - * mdtol (float): - Tolerance of missing data. The value returned in each element of the - returned array will be masked if the fraction of masked data exceeds - mdtol. This fraction is weighted by the `weights` array if one is - provided. mdtol=0 means no missing data is tolerated - while mdtol=1 will mean the resulting element will be masked if and - only if all the contributing elements of data are masked. - Defaults to 0. - - Returns: - Numpy array (possibly masked) or scalar. - - """ - if ma.is_masked(data): - res, unmasked_weights_sum = ma.average(data, weights=weights, - axis=axis, returned=True) - if mdtol < 1: - weights_sum = weights.sum(axis=axis) - frac_masked = 1 - np.true_divide(unmasked_weights_sum, weights_sum) - mask_pt = frac_masked > mdtol - if np.any(mask_pt) and not isinstance(res, ma.core.MaskedConstant): - if np.isscalar(res): - res = ma.masked - elif ma.isMaskedArray(res): - res.mask |= mask_pt - else: - res = ma.masked_array(res, mask=mask_pt) - else: - res = np.average(data, weights=weights, axis=axis) - return res - - -def _regrid_area_weighted_array(src_data, x_dim, y_dim, - src_x_bounds, src_y_bounds, - grid_x_bounds, grid_y_bounds, - grid_x_decreasing, grid_y_decreasing, - area_func, circular=False, mdtol=0): - """ - Regrid the given data from its source grid to a new grid using - an area weighted mean to determine the resulting data values. - - .. note:: - - Elements in the returned array that lie either partially - or entirely outside of the extent of the source grid will - be masked irrespective of the value of mdtol. - - Args: - - * src_data: - An N-dimensional NumPy array. - * x_dim: - The X dimension within `src_data`. - * y_dim: - The Y dimension within `src_data`. - * src_x_bounds: - A NumPy array of bounds along the X axis defining the source grid. - * src_y_bounds: - A NumPy array of bounds along the Y axis defining the source grid. - * grid_x_bounds: - A NumPy array of bounds along the X axis defining the new grid. - * grid_y_bounds: - A NumPy array of bounds along the Y axis defining the new grid. - * grid_x_decreasing: - Boolean indicating whether the X coordinate of the new grid is - in descending order. - * grid_y_decreasing: - Boolean indicating whether the Y coordinate of the new grid is - in descending order. - * area_func: - A function that returns an (p, q) array of weights given an (p, 2) - shaped array of Y bounds and an (q, 2) shaped array of X bounds. - - Kwargs: - - * circular: - A boolean indicating whether the `src_x_bounds` are periodic. Default - is False. - - * mdtol: - Tolerance of missing data. The value returned in each element of the - returned array will be masked if the fraction of missing data exceeds - mdtol. This fraction is calculated based on the area of masked cells - within each target cell. mdtol=0 means no missing data is tolerated - while mdtol=1 will mean the resulting element will be masked if and - only if all the overlapping elements of the source grid are masked. - Defaults to 0. - - Returns: - The regridded data as an N-dimensional NumPy array. The lengths - of the X and Y dimensions will now match those of the target - grid. +def regrid_area_weighted_rectilinear_src_and_grid(src_cube, grid_cube, mdtol=0): + """Regrid using the area weighted mean of data values. - """ - # Create empty data array to match the new grid. - # Note that dtype is not preserved and that the array is - # masked to allow for regions that do not overlap. - new_shape = list(src_data.shape) - if x_dim is not None: - new_shape[x_dim] = grid_x_bounds.shape[0] - if y_dim is not None: - new_shape[y_dim] = grid_y_bounds.shape[0] - - # Use input cube dtype or convert values to the smallest possible float - # dtype when necessary. - dtype = np.promote_types(src_data.dtype, np.float16) - - # Flag to indicate whether the original data was a masked array. - src_masked = ma.isMaskedArray(src_data) - if src_masked: - new_data = ma.zeros(new_shape, fill_value=src_data.fill_value, - dtype=dtype) - else: - new_data = ma.zeros(new_shape, dtype=dtype) - # Assign to mask to explode it, allowing indexed assignment. - new_data.mask = False - - indices = [slice(None)] * new_data.ndim - - # Determine which grid bounds are within src extent. - y_within_bounds = _within_bounds(src_y_bounds, grid_y_bounds, - grid_y_decreasing) - x_within_bounds = _within_bounds(src_x_bounds, grid_x_bounds, - grid_x_decreasing) - - # Cache which src_bounds are within grid bounds - cached_x_bounds = [] - cached_x_indices = [] - for (x_0, x_1) in grid_x_bounds: - if grid_x_decreasing: - x_0, x_1 = x_1, x_0 - x_bounds, x_indices = _cropped_bounds(src_x_bounds, x_0, x_1) - cached_x_bounds.append(x_bounds) - cached_x_indices.append(x_indices) - - # Simple for loop approach. - for j, (y_0, y_1) in enumerate(grid_y_bounds): - # Reverse lower and upper if dest grid is decreasing. - if grid_y_decreasing: - y_0, y_1 = y_1, y_0 - y_bounds, y_indices = _cropped_bounds(src_y_bounds, y_0, y_1) - for i, (x_0, x_1) in enumerate(grid_x_bounds): - # Reverse lower and upper if dest grid is decreasing. - if grid_x_decreasing: - x_0, x_1 = x_1, x_0 - x_bounds = cached_x_bounds[i] - x_indices = cached_x_indices[i] - - # Determine whether to mask element i, j based on overlap with - # src. - # If x_0 > x_1 then we want [0]->x_1 and x_0->[0] + mod in the case - # of wrapped longitudes. However if the src grid is not global - # (i.e. circular) this new cell would include a region outside of - # the extent of the src grid and should therefore be masked. - outside_extent = x_0 > x_1 and not circular - if (outside_extent or not y_within_bounds[j] or not - x_within_bounds[i]): - # Mask out element(s) in new_data - if x_dim is not None: - indices[x_dim] = i - if y_dim is not None: - indices[y_dim] = j - new_data[tuple(indices)] = ma.masked - else: - # Calculate weighted mean of data points. - # Slice out relevant data (this may or may not be a view() - # depending on x_indices being a slice or not). - if x_dim is not None: - indices[x_dim] = x_indices - if y_dim is not None: - indices[y_dim] = y_indices - if isinstance(x_indices, tuple) and \ - isinstance(y_indices, tuple): - raise RuntimeError('Cannot handle split bounds ' - 'in both x and y.') - data = src_data[tuple(indices)] - - # Calculate weights based on areas of cropped bounds. - weights = area_func(y_bounds, x_bounds) - - # Numpy 1.7 allows the axis keyword arg to be a tuple. - # If the version of NumPy is less than 1.7 manipulate the axes - # of the data so the x and y dimensions can be flattened. - if _NP_VERSION.minor < 7: - if y_dim is not None and x_dim is not None: - flattened_shape = list(data.shape) - if y_dim > x_dim: - data = np.rollaxis(data, y_dim, data.ndim) - data = np.rollaxis(data, x_dim, data.ndim) - del flattened_shape[y_dim] - del flattened_shape[x_dim] - else: - data = np.rollaxis(data, x_dim, data.ndim) - data = np.rollaxis(data, y_dim, data.ndim) - del flattened_shape[x_dim] - del flattened_shape[y_dim] - weights = weights.T - flattened_shape.append(-1) - data = data.reshape(*flattened_shape) - elif y_dim is not None: - flattened_shape = list(data.shape) - del flattened_shape[y_dim] - flattened_shape.append(-1) - data = data.swapaxes(y_dim, -1).reshape( - *flattened_shape) - elif x_dim is not None: - flattened_shape = list(data.shape) - del flattened_shape[x_dim] - flattened_shape.append(-1) - data = data.swapaxes(x_dim, -1).reshape( - *flattened_shape) - weights = weights.ravel() - axis = -1 - else: - # Transpose weights to match dim ordering in data. - weights_shape_y = weights.shape[0] - weights_shape_x = weights.shape[1] - if x_dim is not None and y_dim is not None and \ - x_dim < y_dim: - weights = weights.T - # Broadcast the weights array to allow numpy's ma.average - # to be called. - weights_padded_shape = [1] * data.ndim - axes = [] - if y_dim is not None: - weights_padded_shape[y_dim] = weights_shape_y - axes.append(y_dim) - if x_dim is not None: - weights_padded_shape[x_dim] = weights_shape_x - axes.append(x_dim) - # Assign new shape to raise error on copy. - weights.shape = weights_padded_shape - # Broadcast weights to match shape of data. - _, weights = np.broadcast_arrays(data, weights) - # Axes of data over which the weighted mean is calculated. - axis = tuple(axes) - - # Calculate weighted mean taking into account missing data. - new_data_pt = _weighted_mean_with_mdtol( - data, weights=weights, axis=axis, mdtol=mdtol) - - # Insert data (and mask) values into new array. - if x_dim is not None: - indices[x_dim] = i - if y_dim is not None: - indices[y_dim] = j - new_data[tuple(indices)] = new_data_pt - - # Remove new mask if original data was not masked - # and no values in the new array are masked. - if not src_masked and not new_data.mask.any(): - new_data = new_data.data - - return new_data - - -def regrid_area_weighted_rectilinear_src_and_grid(src_cube, grid_cube, - mdtol=0): - """ Return a new cube with data values calculated using the area weighted mean of data values from src_grid regridded onto the horizontal grid of grid_cube. @@ -642,24 +64,15 @@ def regrid_area_weighted_rectilinear_src_and_grid(src_cube, grid_cube, also requires that the coordinates describing the horizontal grids all have bounds. - .. note:: - - Elements in data array of the returned cube that lie either partially - or entirely outside of the horizontal extent of the src_cube will - be masked irrespective of the value of mdtol. - - Args: - - * src_cube: + Parameters + ---------- + src_cube : :class:`iris.cube.Cube` An instance of :class:`iris.cube.Cube` that supplies the data, metadata and coordinates. - * grid_cube: + grid_cube : :class:`iris.cube.Cube` An instance of :class:`iris.cube.Cube` that supplies the desired horizontal grid definition. - - Kwargs: - - * mdtol: + mdtol : int, default=0 Tolerance of missing data. The value returned in each element of the returned cube's data array will be masked if the fraction of masked data in the overlapping cells of the source cube exceeds mdtol. This @@ -668,123 +81,51 @@ def regrid_area_weighted_rectilinear_src_and_grid(src_cube, grid_cube, will mean the resulting element will be masked if and only if all the overlapping cells of the source cube are masked. Defaults to 0. - Returns: - A new :class:`iris.cube.Cube` instance. + Returns + ------- + A new :class:`iris.cube.Cube` instance. + + Notes + ----- + Elements in data array of the returned cube that lie either partially + or entirely outside of the horizontal extent of the src_cube will + be masked irrespective of the value of mdtol. + + .. deprecated:: 3.2.0 + + This function is scheduled to be removed in a future release. + Please use :meth:`iris.cube.Cube.regrid` with the + :class:`iris.analysis.AreaWeighted` scheme instead : this is an exact + replacement. + + For example : + + .. code:: + + result = src_cube.regrid(grid_cube, AreaWeighted()) """ - # Get the 1d monotonic (or scalar) src and grid coordinates. - src_x, src_y = _get_xy_coords(src_cube) - grid_x, grid_y = _get_xy_coords(grid_cube) - - # Condition 1: All x and y coordinates must have contiguous bounds to - # define areas. - if not src_x.is_contiguous() or not src_y.is_contiguous() or \ - not grid_x.is_contiguous() or not grid_y.is_contiguous(): - raise ValueError("The horizontal grid coordinates of both the source " - "and grid cubes must have contiguous bounds.") - - # Condition 2: Everything must have the same coordinate system. - src_cs = src_x.coord_system - grid_cs = grid_x.coord_system - if src_cs != grid_cs: - raise ValueError("The horizontal grid coordinates of both the source " - "and grid cubes must have the same coordinate " - "system.") - - # Condition 3: cannot create vector coords from scalars. - src_x_dims = src_cube.coord_dims(src_x) - src_x_dim = None - if src_x_dims: - src_x_dim = src_x_dims[0] - src_y_dims = src_cube.coord_dims(src_y) - src_y_dim = None - if src_y_dims: - src_y_dim = src_y_dims[0] - if src_x_dim is None and grid_x.shape[0] != 1 or \ - src_y_dim is None and grid_y.shape[0] != 1: - raise ValueError('The horizontal grid coordinates of source cube ' - 'includes scalar coordinates, but the new grid does ' - 'not. The new grid must not require additional data ' - 'dimensions to be created.') - - # Determine whether to calculate flat or spherical areas. - # Don't only rely on coord system as it may be None. - spherical = (isinstance(src_cs, (iris.coord_systems.GeogCS, - iris.coord_systems.RotatedGeogCS)) or - src_x.units == 'degrees' or src_x.units == 'radians') - - # Get src and grid bounds in the same units. - x_units = cf_units.Unit('radians') if spherical else src_x.units - y_units = cf_units.Unit('radians') if spherical else src_y.units - - # Operate in highest precision. - src_dtype = np.promote_types(src_x.bounds.dtype, src_y.bounds.dtype) - grid_dtype = np.promote_types(grid_x.bounds.dtype, grid_y.bounds.dtype) - dtype = np.promote_types(src_dtype, grid_dtype) - - src_x_bounds = _get_bounds_in_units(src_x, x_units, dtype) - src_y_bounds = _get_bounds_in_units(src_y, y_units, dtype) - grid_x_bounds = _get_bounds_in_units(grid_x, x_units, dtype) - grid_y_bounds = _get_bounds_in_units(grid_y, y_units, dtype) - - # Determine whether target grid bounds are decreasing. This must - # be determined prior to wrap_lons being called. - grid_x_decreasing = grid_x_bounds[-1, 0] < grid_x_bounds[0, 0] - grid_y_decreasing = grid_y_bounds[-1, 0] < grid_y_bounds[0, 0] - - # Wrapping of longitudes. - if spherical: - base = np.min(src_x_bounds) - modulus = x_units.modulus - # Only wrap if necessary to avoid introducing floating - # point errors. - if np.min(grid_x_bounds) < base or \ - np.max(grid_x_bounds) > (base + modulus): - grid_x_bounds = iris.analysis.cartography.wrap_lons(grid_x_bounds, - base, modulus) - - # Determine whether the src_x coord has periodic boundary conditions. - circular = getattr(src_x, 'circular', False) - - # Use simple cartesian area function or one that takes into - # account the curved surface if coord system is spherical. - if spherical: - area_func = _spherical_area - else: - area_func = _cartesian_area - - # Calculate new data array for regridded cube. - new_data = _regrid_area_weighted_array(src_cube.data, src_x_dim, src_y_dim, - src_x_bounds, src_y_bounds, - grid_x_bounds, grid_y_bounds, - grid_x_decreasing, - grid_y_decreasing, - area_func, circular, mdtol) - - # Wrap up the data as a Cube. - # Create 2d meshgrids as required by _create_cube func. - meshgrid_x, meshgrid_y = _meshgrid(grid_x.points, grid_y.points) - regrid_callback = RectilinearRegridder._regrid - new_cube = RectilinearRegridder._create_cube(new_data, src_cube, - src_x_dim, src_y_dim, - src_x, src_y, grid_x, grid_y, - meshgrid_x, meshgrid_y, - regrid_callback) - - # Slice out any length 1 dimensions. - indices = [slice(None, None)] * new_data.ndim - if src_x_dim is not None and new_cube.shape[src_x_dim] == 1: - indices[src_x_dim] = 0 - if src_y_dim is not None and new_cube.shape[src_y_dim] == 1: - indices[src_y_dim] = 0 - if 0 in indices: - new_cube = new_cube[tuple(indices)] - - return new_cube + wmsg = ( + "The function " + "'iris.experimental.regrid." + "regrid_area_weighted_rectilinear_src_and_grid' " + "has been deprecated, and will be removed in a future release. " + "Please consult the docstring for details." + ) + warn_deprecated(wmsg) + + regrid_info = _regrid_area_weighted_rectilinear_src_and_grid__prepare( + src_cube, grid_cube + ) + result = _regrid_area_weighted_rectilinear_src_and_grid__perform( + src_cube, regrid_info, mdtol + ) + return result def regrid_weighted_curvilinear_to_rectilinear(src_cube, weights, grid_cube): - """ + r"""Regrid using the weighted mean and the weights. + Return a new cube with the data values calculated using the weighted mean of data values from :data:`src_cube` and the weights from :data:`weights` regridded onto the horizontal grid of :data:`grid_cube`. @@ -808,38 +149,62 @@ def regrid_weighted_curvilinear_to_rectilinear(src_cube, weights, grid_cube): :math:`\sum (src\_cube.data_{ij} * weights_{ij}) / \sum weights_{ij}`, for all :math:`ij` :data:`src_cube` points that are bound by that cell. - .. warning:: - - * All coordinates that span the :data:`src_cube` that don't define - the horizontal curvilinear grid will be ignored. - - Args: + Warnings + -------- + All coordinates that span the :data:`src_cube` that don't define + the horizontal curvilinear grid will be ignored. - * src_cube: + Parameters + ---------- + src_cube : :class:`iris.cube.Cube` A :class:`iris.cube.Cube` instance that defines the source variable grid to be regridded. - * weights (array or None): + weights : array or None A :class:`numpy.ndarray` instance that defines the weights for the source variable grid cells. Must have the same shape as the X and Y coordinates. If weights is None, all-ones will be used. - * grid_cube: + grid_cube : :class:`iris.cube.Cube` A :class:`iris.cube.Cube` instance that defines the target rectilinear grid. - Returns: - A :class:`iris.cube.Cube` instance. + Returns + ------- + A :class:`iris.cube.Cube` instance. + + Notes + ----- + .. deprecated:: 3.2.0 + + This function is scheduled to be removed in a future release. + Please use :meth:`iris.cube.Cube.regrid` with the + :class:`iris.analysis.PointInCell` scheme instead : this is an exact + replacement. + + For example : + + .. code:: + + result = src_cube.regrid(grid_cube, PointInCell()) """ - regrid_info = \ - _regrid_weighted_curvilinear_to_rectilinear__prepare( - src_cube, weights, grid_cube) - result = _regrid_weighted_curvilinear_to_rectilinear__perform( - src_cube, regrid_info) + wmsg = ( + "The function " + "'iris.experimental.regrid." + "regrid_weighted_curvilinear_to_rectilinear' " + "has been deprecated, and will be removed in a future release. " + "Please consult the docstring for details." + ) + warn_deprecated(wmsg) + regrid_info = _regrid_weighted_curvilinear_to_rectilinear__prepare( + src_cube, weights, grid_cube + ) + result = _regrid_weighted_curvilinear_to_rectilinear__perform(src_cube, regrid_info) return result -class PointInCell(object): - """ +class PointInCell: + """Describe the point-in-cell regridding scheme. + This class describes the point-in-cell regridding scheme for use typically with :meth:`iris.cube.Cube.regrid()`. @@ -851,10 +216,9 @@ class PointInCell(object): :class:`iris.analysis.PointInCell`. """ + def __init__(self, weights=None): - """ - Point-in-cell regridding scheme suitable for regridding over one - or more orthogonal coordinates. + """Point-in-cell regridding scheme for regridding over one or more orthogonal coordinates. .. warning:: @@ -866,32 +230,28 @@ def __init__(self, weights=None): """ raise Exception( 'The class "iris.experimental.PointInCell" has been ' - 'moved, and is now in iris.analysis' - '\nPlease replace ' + "moved, and is now in iris.analysis" + "\nPlease replace " '"iris.experimental.PointInCell" with ' - '"iris.analysis.PointInCell".') + '"iris.analysis.PointInCell".' + ) -class _ProjectedUnstructuredRegridder(object): - """ - This class provides regridding that uses scipy.interpolate.griddata. +class _ProjectedUnstructuredRegridder: + """Regridding that uses scipy.interpolate.griddata.""" - """ - def __init__(self, src_cube, tgt_grid_cube, method, - projection=None): - """ - Create a regridder for conversions between the source - and target grids. - - Args: + def __init__(self, src_cube, tgt_grid_cube, method, projection=None): + """Create a regridder for conversions between the source and target grids. - * src_cube: + Parameters + ---------- + src_cube : :class:`~iris.cube.Cube` The :class:`~iris.cube.Cube` providing the source points. - * tgt_grid_cube: + tgt_grid_cube : :class:`~iris.cube.Cube` The :class:`~iris.cube.Cube` providing the target grid. - * method: + method : Either 'linear' or 'nearest'. - * projection: + projection : optional The projection in which the interpolation is performed. If None, a PlateCarree projection is used. Defaults to None. @@ -911,25 +271,33 @@ def __init__(self, src_cube, tgt_grid_cube, method, self._check_units(coord) # Whether to use linear or nearest-neighbour interpolation. - if method not in ('linear', 'nearest'): - msg = 'Regridding method {!r} not supported.'.format(method) + if method not in ("linear", "nearest"): + msg = "Regridding method {!r} not supported.".format(method) raise ValueError(msg) self._method = method src_x_coord, src_y_coord = get_xy_coords(src_cube) if src_x_coord.coord_system != src_y_coord.coord_system: - raise ValueError("'src_cube' lateral geographic coordinates have " - "differing coordinate sytems.") + raise ValueError( + "'src_cube' lateral geographic coordinates have " + "differing coordinate systems." + ) if src_x_coord.coord_system is None: - raise ValueError("'src_cube' lateral geographic coordinates have " - "no coordinate sytem.") + raise ValueError( + "'src_cube' lateral geographic coordinates have " + "no coordinate system." + ) tgt_x_coord, tgt_y_coord = get_xy_dim_coords(tgt_grid_cube) if tgt_x_coord.coord_system != tgt_y_coord.coord_system: - raise ValueError("'tgt_grid_cube' lateral geographic coordinates " - "have differing coordinate sytems.") + raise ValueError( + "'tgt_grid_cube' lateral geographic coordinates " + "have differing coordinate systems." + ) if tgt_x_coord.coord_system is None: - raise ValueError("'tgt_grid_cube' lateral geographic coordinates " - "have no coordinate sytem.") + raise ValueError( + "'tgt_grid_cube' lateral geographic coordinates " + "have no coordinate system." + ) if projection is None: globe = src_x_coord.coord_system.as_cartopy_globe() @@ -940,47 +308,60 @@ def _check_units(self, coord): if coord.coord_system is None: # No restriction on units. pass - elif isinstance(coord.coord_system, - (iris.coord_systems.GeogCS, - iris.coord_systems.RotatedGeogCS)): + elif isinstance( + coord.coord_system, + (iris.coord_systems.GeogCS, iris.coord_systems.RotatedGeogCS), + ): # Units for lat-lon or rotated pole must be 'degrees'. Note # that 'degrees_east' etc. are equal to 'degrees'. - if coord.units != 'degrees': - msg = "Unsupported units for coordinate system. " \ - "Expected 'degrees' got {!r}.".format(coord.units) + if coord.units != "degrees": + msg = ( + "Unsupported units for coordinate system. " + "Expected 'degrees' got {!r}.".format(coord.units) + ) raise ValueError(msg) else: # Units for other coord systems must be equal to metres. - if coord.units != 'm': - msg = "Unsupported units for coordinate system. " \ - "Expected 'metres' got {!r}.".format(coord.units) + if coord.units != "m": + msg = ( + "Unsupported units for coordinate system. " + "Expected 'metres' got {!r}.".format(coord.units) + ) raise ValueError(msg) @staticmethod - def _regrid(src_data, xy_dim, src_x_coord, src_y_coord, - tgt_x_coord, tgt_y_coord, - projection, method): - """ - Regrids input data from the source to the target. Calculation is. - - """ + def _regrid( + src_data, + xy_dim, + src_x_coord, + src_y_coord, + tgt_x_coord, + tgt_y_coord, + projection, + method, + ): + """Regrid input data from the source to the target. Calculation is.""" # Transform coordinates into the projection the interpolation will be # performed in. src_projection = src_x_coord.coord_system.as_cartopy_projection() projected_src_points = projection.transform_points( - src_projection, src_x_coord.points, src_y_coord.points) + src_projection, src_x_coord.points, src_y_coord.points + ) tgt_projection = tgt_x_coord.coord_system.as_cartopy_projection() tgt_x, tgt_y = _meshgrid(tgt_x_coord.points, tgt_y_coord.points) - projected_tgt_grid = projection.transform_points( - tgt_projection, tgt_x, tgt_y) + projected_tgt_grid = projection.transform_points(tgt_projection, tgt_x, tgt_y) # Prepare the result data array. # XXX TODO: Deal with masked src_data - tgt_y_shape, = tgt_y_coord.shape - tgt_x_shape, = tgt_x_coord.shape - tgt_shape = src_data.shape[:xy_dim] + (tgt_y_shape,) + (tgt_x_shape,) \ - + src_data.shape[xy_dim+1:] + (tgt_y_shape,) = tgt_y_coord.shape + (tgt_x_shape,) = tgt_x_coord.shape + tgt_shape = ( + src_data.shape[:xy_dim] + + (tgt_y_shape,) + + (tgt_x_shape,) + + src_data.shape[xy_dim + 1 :] + ) data = np.empty(tgt_shape, dtype=src_data.dtype) iter_shape = list(src_data.shape) @@ -990,55 +371,66 @@ def _regrid(src_data, xy_dim, src_x_coord, src_y_coord, src_index = list(index) src_index[xy_dim] = slice(None) src_subset = src_data[tuple(src_index)] - tgt_index = index[:xy_dim] + (slice(None), slice(None)) \ - + index[xy_dim+1:] + tgt_index = ( + index[:xy_dim] + (slice(None), slice(None)) + index[xy_dim + 1 :] + ) data[tgt_index] = scipy.interpolate.griddata( - projected_src_points[..., :2], src_subset, + projected_src_points[..., :2], + src_subset, (projected_tgt_grid[..., 0], projected_tgt_grid[..., 1]), - method=method) + method=method, + ) data = np.ma.array(data, mask=np.isnan(data)) return data - def _create_cube(self, data, src, src_xy_dim, src_x_coord, src_y_coord, - grid_x_coord, grid_y_coord, - regrid_callback): - """ - Return a new Cube for the result of regridding the source Cube onto - the new grid. + def _create_cube( + self, + data, + src, + src_xy_dim, + src_x_coord, + src_y_coord, + grid_x_coord, + grid_y_coord, + regrid_callback, + ): + """Return a new Cube for the result of regridding the source Cube onto the new grid. All the metadata and coordinates of the result Cube are copied from the source Cube, with two exceptions: - - Grid dimension coordinates are copied from the grid Cube. - - Auxiliary coordinates which span the grid dimensions are - ignored, except where they provide a reference surface for an - :class:`iris.aux_factory.AuxCoordFactory`. - Args: + * Grid dimension coordinates are copied from the grid Cube. + * Auxiliary coordinates which span the grid dimensions are + ignored, except where they provide a reference surface for an + :class:`iris.aux_factory.AuxCoordFactory`. - * data: + Parameters + ---------- + data : The regridded data as an N-dimensional NumPy array. - * src: + src : :class:`~iris.cube.Cube` The source Cube. - * src_xy_dim: + src_xy_dim : The dimension the X and Y coord span within the source Cube. - * src_x_coord: + src_x_coord : The X coordinate (either :class:`iris.coords.AuxCoord` or :class:`iris.coords.DimCoord`). - * src_y_coord: + src_y_coord : The Y coordinate (either :class:`iris.coords.AuxCoord` or :class:`iris.coords.DimCoord`). - * grid_x_coord: + grid_x_coord : The :class:`iris.coords.DimCoord` for the new grid's X coordinate. - * grid_y_coord: + grid_y_coord : The :class:`iris.coords.DimCoord` for the new grid's Y coordinate. - * regrid_callback: + regrid_callback : The routine that will be used to calculate the interpolated values of any reference surfaces. - Returns: - The new, regridded Cube. + Returns + ------- + The new, regridded Cube. """ # Create a result cube with the appropriate metadata @@ -1073,58 +465,77 @@ def copy_coords(src_coords, add_method): copy_coords(src.dim_coords, result.add_dim_coord) copy_coords(src.aux_coords, result.add_aux_coord) - def regrid_reference_surface(src_surface_coord, surface_dims, - src_xy_dim, src_x_coord, src_y_coord, - grid_x_coord, grid_y_coord, - regrid_callback): + def regrid_reference_surface( + src_surface_coord, + surface_dims, + src_xy_dim, + src_x_coord, + src_y_coord, + grid_x_coord, + grid_y_coord, + regrid_callback, + ): # Determine which of the reference surface's dimensions span the X # and Y dimensions of the source cube. surface_xy_dim = surface_dims.index(src_xy_dim) - surface = regrid_callback(src_surface_coord.points, surface_xy_dim, - src_x_coord, src_y_coord, - grid_x_coord, grid_y_coord) + surface = regrid_callback( + src_surface_coord.points, + surface_xy_dim, + src_x_coord, + src_y_coord, + grid_x_coord, + grid_y_coord, + ) surface_coord = src_surface_coord.copy(surface) return surface_coord # Copy across any AuxFactory instances, and regrid their reference # surfaces where required. for factory in src.aux_factories: - for coord in six.itervalues(factory.dependencies): + for coord in factory.dependencies.values(): if coord is None: continue dims = src.coord_dims(coord) if src_xy_dim in dims: - result_coord = regrid_reference_surface(coord, dims, - src_xy_dim, - src_x_coord, - src_y_coord, - grid_x_coord, - grid_y_coord, - regrid_callback) - result.add_aux_coord(result_coord, (dims[0], dims[0]+1)) + result_coord = regrid_reference_surface( + coord, + dims, + src_xy_dim, + src_x_coord, + src_y_coord, + grid_x_coord, + grid_y_coord, + regrid_callback, + ) + result.add_aux_coord(result_coord, (dims[0], dims[0] + 1)) coord_mapping[id(coord)] = result_coord try: result.add_aux_factory(factory.updated(coord_mapping)) except KeyError: - msg = 'Cannot update aux_factory {!r} because of dropped' \ - ' coordinates.'.format(factory.name()) - warnings.warn(msg) + msg = ( + "Cannot update aux_factory {!r} because of dropped" + " coordinates.".format(factory.name()) + ) + warnings.warn(msg, category=IrisImpossibleUpdateWarning) return result def __call__(self, src_cube): - """ + """Regrid to the target grid. + Regrid this :class:`~iris.cube.Cube` on to the target grid of this :class:`UnstructuredProjectedRegridder`. The given cube must be defined with the same grid as the source grid used to create this :class:`UnstructuredProjectedRegridder`. - Args: - - * src_cube: + Parameters + ---------- + src_cube : :class:`~iris.cube.Cube` A :class:`~iris.cube.Cube` to be regridded. - Returns: + Returns + ------- + :class:`~iris.cube.Cube` A cube defined with the horizontal dimensions of the target and the other dimensions from this cube. The data values of this cube will be converted to values on the new grid using @@ -1138,48 +549,64 @@ def __call__(self, src_cube): src_x_coord, src_y_coord = get_xy_coords(src_cube) tgt_x_coord, tgt_y_coord = self._tgt_grid src_cs = src_x_coord.coord_system - tgt_cs = tgt_x_coord.coord_system if src_x_coord.coord_system != src_y_coord.coord_system: - raise ValueError("'src' lateral geographic coordinates have " - "differing coordinate sytems.") + raise ValueError( + "'src' lateral geographic coordinates have " + "differing coordinate systems." + ) if src_cs is None: - raise ValueError("'src' lateral geographic coordinates have " - "no coordinate sytem.") + raise ValueError( + "'src' lateral geographic coordinates have no coordinate system." + ) # Check the source grid units. for coord in (src_x_coord, src_y_coord): self._check_units(coord) - src_x_dim, = src_cube.coord_dims(src_x_coord) - src_y_dim, = src_cube.coord_dims(src_y_coord) + (src_x_dim,) = src_cube.coord_dims(src_x_coord) + (src_y_dim,) = src_cube.coord_dims(src_y_coord) if src_x_dim != src_y_dim: - raise ValueError("'src' lateral geographic coordinates should map " - "the same dimension.") + raise ValueError( + "'src' lateral geographic coordinates should map the same dimension." + ) src_xy_dim = src_x_dim # Compute the interpolated data values. - data = self._regrid(src_cube.data, src_xy_dim, - src_x_coord, src_y_coord, - tgt_x_coord, tgt_y_coord, - self._projection, method=self._method) + data = self._regrid( + src_cube.data, + src_xy_dim, + src_x_coord, + src_y_coord, + tgt_x_coord, + tgt_y_coord, + self._projection, + method=self._method, + ) # Wrap up the data as a Cube. - regrid_callback = functools.partial(self._regrid, - method=self._method, - projection=self._projection) - - new_cube = self._create_cube(data, src_cube, src_xy_dim, - src_x_coord, src_y_coord, - tgt_x_coord, tgt_y_coord, - regrid_callback) + regrid_callback = functools.partial( + self._regrid, method=self._method, projection=self._projection + ) + + new_cube = self._create_cube( + data, + src_cube, + src_xy_dim, + src_x_coord, + src_y_coord, + tgt_x_coord, + tgt_y_coord, + regrid_callback, + ) return new_cube -class ProjectedUnstructuredLinear(object): - """ +class ProjectedUnstructuredLinear: + """Describe the linear regridding scheme. + This class describes the linear regridding scheme which uses the scipy.interpolate.griddata to regrid unstructured data on to a grid. @@ -1187,23 +614,42 @@ class ProjectedUnstructuredLinear(object): projection for the scipy calculation to be performed. """ + def __init__(self, projection=None): - """ + """Linear regridding scheme. + Linear regridding scheme that uses scipy.interpolate.griddata on projected unstructured data. - Optional Args: - - * projection: `cartopy.crs instance` + Parameters + ---------- + projection : `cartopy.crs` instance, optional The projection that the scipy calculation is performed in. If None is given, a PlateCarree projection is used. Defaults to None. + Notes + ----- + .. deprecated:: 3.2.0 + + This class is scheduled to be removed in a future release, and no + replacement is currently planned. + If you make use of this functionality, please contact the Iris + Developers to discuss how to retain it (which could include + reversing the deprecation). + """ self.projection = projection + wmsg = ( + "The class iris.experimental.regrid.ProjectedUnstructuredLinear " + "has been deprecated, and will be removed in a future release. " + "Please consult the docstring for details." + ) + warn_deprecated(wmsg) def regridder(self, src_cube, target_grid): - """ + """Create a linear regridder to perform regridding. + Creates a linear regridder to perform regridding, using scipy.interpolate.griddata from unstructured source points to the target grid. The regridding calculation is performed in the given @@ -1214,16 +660,20 @@ def regridder(self, src_cube, target_grid): constructing your own regridder is preferable. These are detailed in the :ref:`user guide `. - Args: + Does not support lazy regridding. - * src_cube: + Parameters + ---------- + src_cube : :class:`~iris.cube.Cube` The :class:`~iris.cube.Cube` defining the unstructured source points. - * target_grid: + target_grid : :class:`~iris.cube.Cube` The :class:`~iris.cube.Cube` defining the target grid. - Returns: - A callable with the interface: + Returns + ------- + callable + A callable with the interface:: `callable(cube)` @@ -1231,43 +681,63 @@ def regridder(self, src_cube, target_grid): that is to be regridded to the `target_grid`. """ - return _ProjectedUnstructuredRegridder(src_cube, target_grid, - 'linear', self.projection) + return _ProjectedUnstructuredRegridder( + src_cube, target_grid, "linear", self.projection + ) -class ProjectedUnstructuredNearest(object): - """ +class ProjectedUnstructuredNearest: + """Describe the nearest regridding scheme which uses scipy.interpolate.griddata. + This class describes the nearest regridding scheme which uses the scipy.interpolate.griddata to regrid unstructured data on to a grid. The source cube and the target cube will be projected into a common projection for the scipy calculation to be performed. - .. Note:: + .. note:: The :class:`iris.analysis.UnstructuredNearest` scheme performs essentially the same job. That calculation is more rigorously correct and may be applied to larger data regions (including global). This one however, where applicable, is substantially faster. """ - def __init__(self, projection=None): - """ - Nearest regridding scheme that uses scipy.interpolate.griddata on - projected unstructured data. - Optional Args: + def __init__(self, projection=None): + """Nearest regridding scheme that uses scipy.interpolate.griddata on projected unstructured data. - * projection: `cartopy.crs instance` + Parameters + ---------- + projection : `cartopy.crs instance`, optional The projection that the scipy calculation is performed in. If None is given, a PlateCarree projection is used. Defaults to None. + Notes + ----- + .. deprecated:: 3.2.0 + + This class is scheduled to be removed in a future release, and no + exact replacement is currently planned. + Please use :class:`iris.analysis.UnstructuredNearest` instead, if + possible. If you have a need for this exact functionality, please + contact the Iris Developers to discuss how to retain it (which + could include reversing the deprecation). + """ self.projection = projection + wmsg = ( + "iris.experimental.regrid.ProjectedUnstructuredNearest has been " + "deprecated, and will be removed in a future release. " + "Please use 'iris.analysis.UnstructuredNearest' instead, where " + "possible. Consult the docstring for details." + ) + warn_deprecated(wmsg) def regridder(self, src_cube, target_grid): - """ - Creates a nearest-neighbour regridder to perform regridding, using + """Create a nearest-neighbour regridder to perform regridding. + + Create a nearest-neighbour regridder to perform regridding, using scipy.interpolate.griddata from unstructured source points to the target grid. The regridding calculation is performed in the given projection. @@ -1277,16 +747,20 @@ def regridder(self, src_cube, target_grid): constructing your own regridder is preferable. These are detailed in the :ref:`user guide `. - Args: + Does not support lazy regridding. - * src_cube: + Parameters + ---------- + src_cube : :class:`~iris.cube.Cube` The :class:`~iris.cube.Cube` defining the unstructured source points. - * target_grid: + target_grid : :class:`~iris.cube.Cube` The :class:`~iris.cube.Cube` defining the target grid. - Returns: - A callable with the interface: + Returns + ------- + callable + A callable with the interface:: `callable(cube)` @@ -1294,5 +768,6 @@ def regridder(self, src_cube, target_grid): that is to be regridded to the `target_grid`. """ - return _ProjectedUnstructuredRegridder(src_cube, target_grid, - 'nearest', self.projection) + return _ProjectedUnstructuredRegridder( + src_cube, target_grid, "nearest", self.projection + ) diff --git a/lib/iris/experimental/regrid_conservative.py b/lib/iris/experimental/regrid_conservative.py index d8dc2219a0..c4dbf965f8 100644 --- a/lib/iris/experimental/regrid_conservative.py +++ b/lib/iris/experimental/regrid_conservative.py @@ -1,43 +1,44 @@ -# (C) British Crown Copyright 2013 - 2017, Met Office +# Copyright Iris contributors # -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . -""" -Support for conservative regridding via ESMPy. +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Support for conservative regridding via ESMPy. + +.. deprecated:: 3.2.0 + + This package will be removed in a future release. + Please use + `iris-esmf-regrid `_ + instead. """ -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa +import functools import cartopy.crs as ccrs import numpy as np import iris +from iris._deprecation import warn_deprecated from iris.analysis._interpolation import get_xy_dim_coords -from iris.analysis._regrid import RectilinearRegridder +from iris.analysis._regrid import RectilinearRegridder, _create_cube from iris.util import _meshgrid +wmsg = ( + "The 'iris.experimental.regrid_conservative' package is deprecated since " + "version 3.2, and will be removed in a future release. Please use " + "iris-emsf-regrid instead. " + "See https://github.com/SciTools-incubator/iris-esmf-regrid." +) +warn_deprecated(wmsg) + #: A static Cartopy Geodetic() instance for transforming to true-lat-lons. _CRS_TRUELATLON = ccrs.Geodetic() def _convert_latlons(crs, x_array, y_array): - """ - Convert x+y coords in a given crs to (x,y) values in true-lat-lons. + """Convert x+y coords in a given crs to (x,y) values in true-lat-lons. .. note:: @@ -49,27 +50,23 @@ def _convert_latlons(crs, x_array, y_array): return ll_values[..., 0], ll_values[..., 1] -def _make_esmpy_field(x_coord, y_coord, ref_name='field', - data=None, mask=None): - """ - Create an ESMPy ESMF.Field on given coordinates. +def _make_esmpy_field(x_coord, y_coord, ref_name="field", data=None, mask=None): + """Create an ESMPy ESMF.Field on given coordinates. Create a ESMF.Grid from the coordinates, defining corners and centre positions as lats+lons. Add a grid mask if provided. Create and return a Field mapped on this Grid, setting data if provided. - Args: - - * x_coord, y_coord (:class:`iris.coords.Coord`): + Parameters + ---------- + x_coord, y_coord : :class:`iris.coords.Coord` One-dimensional coordinates of shape (nx,) and (ny,). Their contiguous bounds define an ESMF.Grid of shape (nx, ny). - - Kwargs: - - * data (:class:`numpy.ndarray`, shape (nx,ny)): + ref_name : stre, default="field" + data : :class:`numpy.ndarray`, shape (nx,ny), optional Set the Field data content. - * mask (:class:`numpy.ndarray`, boolean, shape (nx,ny)): + mask : :class:`numpy.ndarray`, bool, shape (nx,ny), optional Add a mask item to the grid, assigning it 0/1 where mask=False/True. """ @@ -82,8 +79,9 @@ def _make_esmpy_field(x_coord, y_coord, ref_name='field', grid = ESMF.Grid(dims) # Get all cell corner coordinates as true-lat-lons - x_bounds, y_bounds = _meshgrid(x_coord.contiguous_bounds(), - y_coord.contiguous_bounds()) + x_bounds, y_bounds = _meshgrid( + x_coord.contiguous_bounds(), y_coord.contiguous_bounds() + ) grid_crs = x_coord.coord_system.as_cartopy_crs() lon_bounds, lat_bounds = _convert_latlons(grid_crs, x_bounds, y_bounds) @@ -98,7 +96,7 @@ def _make_esmpy_field(x_coord, y_coord, ref_name='field', # NOTE: we don't care about Iris' idea of where the points 'really' are # *but* ESMF requires the data in the CENTER for conservative regrid, # according to the documentation : - # - http://www.earthsystemmodeling.org/ + # - https://www.earthsystemmodeling.org/ # esmf_releases/public/last/ESMF_refdoc.pdf # - section 22.2.3 : ESMF_REGRIDMETHOD # @@ -126,8 +124,7 @@ def _make_esmpy_field(x_coord, y_coord, ref_name='field', # Add a mask item, if requested if mask is not None: - grid.add_item(ESMF.GridItem.MASK, - [ESMF.StaggerLoc.CENTER]) + grid.add_item(ESMF.GridItem.MASK, [ESMF.StaggerLoc.CENTER]) grid_mask = grid.get_item(ESMF.GridItem.MASK) grid_mask[:] = np.where(mask, 1, 0) @@ -142,38 +139,38 @@ def _make_esmpy_field(x_coord, y_coord, ref_name='field', def regrid_conservative_via_esmpy(source_cube, grid_cube): - """ - Perform a conservative regridding with ESMPy. + """Perform a conservative regridding with ESMPy. Regrids the data of a source cube onto a new grid defined by a destination cube. - Args: - - * source_cube (:class:`iris.cube.Cube`): + Parameters + ---------- + source_cube : :class:`iris.cube.Cube` Source data. Must have two identifiable horizontal dimension coordinates. - * grid_cube (:class:`iris.cube.Cube`): + grid_cube : :class:`iris.cube.Cube` Define the target horizontal grid: Only the horizontal dimension coordinates are actually used. - Returns: + Returns + ------- + :class:`iris.cube.Cube` A new cube derived from source_cube, regridded onto the specified horizontal grid. + Notes + ----- Any additional coordinates which map onto the horizontal dimensions are removed, while all other metadata is retained. If there are coordinate factories with 2d horizontal reference surfaces, the reference surfaces are also regridded, using ordinary bilinear interpolation. - .. note:: - - Both source and destination cubes must have two dimension coordinates - identified with axes 'X' and 'Y' which share a coord_system with a - Cartopy CRS. - The grids are defined by :meth:`iris.coords.Coord.contiguous_bounds` of - these. + Both source and destination cubes must have two dimension coordinates + identified with axes 'X' and 'Y' which share a coord_system with a + Cartopy CRS. The grids are defined by :meth:`iris.coords.Coord.contiguous_bounds` + of these. .. note:: @@ -182,7 +179,30 @@ def regrid_conservative_via_esmpy(source_cube, grid_cube): To alter this, make a prior call to ESMF.Manager(). + .. deprecated:: 3.2.0 + + This function is scheduled to be removed in a future release. + Please use + `iris-esmf-regrid `_ + instead. + + For example : + + .. code:: + + from emsf_regrid.schemes import ESMFAreaWeighted + result = src_cube.regrid(grid_cube, ESMFAreaWeighted()) + """ + wmsg = ( + "The function " + "'iris.experimental.regrid_conservative." + "regrid_weighted_curvilinear_to_rectilinear' " + "has been deprecated, and will be removed in a future release. " + "Please consult the docstring for details." + ) + warn_deprecated(wmsg) + # Lazy import so we can build the docs with no ESMF. import ESMF @@ -192,20 +212,26 @@ def regrid_conservative_via_esmpy(source_cube, grid_cube): src_cs = src_coords[0].coord_system grid_cs = dst_coords[0].coord_system if src_cs is None or grid_cs is None: - raise ValueError("Both 'src' and 'grid' Cubes must have a" - " coordinate system for their rectilinear grid" - " coordinates.") + raise ValueError( + "Both 'src' and 'grid' Cubes must have a" + " coordinate system for their rectilinear grid" + " coordinates." + ) if src_cs.as_cartopy_crs() is None or grid_cs.as_cartopy_crs() is None: - raise ValueError("Both 'src' and 'grid' Cubes coord_systems must have " - "a valid associated Cartopy CRS.") + raise ValueError( + "Both 'src' and 'grid' Cubes coord_systems must have " + "a valid associated Cartopy CRS." + ) def _valid_units(coord): - if isinstance(coord.coord_system, (iris.coord_systems.GeogCS, - iris.coord_systems.RotatedGeogCS)): - valid_units = 'degrees' + if isinstance( + coord.coord_system, + (iris.coord_systems.GeogCS, iris.coord_systems.RotatedGeogCS), + ): + valid_units = "degrees" else: - valid_units = 'm' + valid_units = "m" return coord.units == valid_units if not all(_valid_units(coord) for coord in src_coords + dst_coords): @@ -223,8 +249,9 @@ def _valid_units(coord): fullcube_data = np.ma.zeros(dst_shape) # Iterate 2d slices over all possible indices of the 'other' dimensions - all_other_dims = [i_dim for i_dim in range(source_cube.ndim) - if i_dim not in src_dims_xy] + all_other_dims = [ + i_dim for i_dim in range(source_cube.ndim) if i_dim not in src_dims_xy + ] all_combinations_of_other_inds = np.ndindex(*dst_shape[all_other_dims]) for other_indices in all_combinations_of_other_inds: # Construct a tuple of slices to address the 2d xy field @@ -234,7 +261,7 @@ def _valid_units(coord): # Get the source data, reformed into the right dimension order, (x,y). src_data_2d = source_cube.data[slice_indices_tuple] - if (src_dims_xy[0] > src_dims_xy[1]): + if src_dims_xy[0] > src_dims_xy[1]: src_data_2d = src_data_2d.transpose() # Work out whether we have missing data to define a source grid mask. @@ -244,20 +271,24 @@ def _valid_units(coord): srcdata_mask = None # Construct ESMF Field objects on source and destination grids. - src_field = _make_esmpy_field(src_coords[0], src_coords[1], - data=src_data_2d, mask=srcdata_mask) + src_field = _make_esmpy_field( + src_coords[0], src_coords[1], data=src_data_2d, mask=srcdata_mask + ) dst_field = _make_esmpy_field(dst_coords[0], dst_coords[1]) # Make Field for destination coverage fraction (for missing data calc). - coverage_field = ESMF.Field(dst_field.grid, 'validmask_dst') + coverage_field = ESMF.Field(dst_field.grid, "validmask_dst") # Do the actual regrid with ESMF. mask_flag_values = np.array([1], dtype=np.int32) - regrid_method = ESMF.Regrid(src_field, dst_field, - src_mask_values=mask_flag_values, - regrid_method=ESMF.RegridMethod.CONSERVE, - unmapped_action=ESMF.UnmappedAction.IGNORE, - dst_frac_field=coverage_field) + regrid_method = ESMF.Regrid( + src_field, + dst_field, + src_mask_values=mask_flag_values, + regrid_method=ESMF.RegridMethod.CONSERVE, + unmapped_action=ESMF.UnmappedAction.IGNORE, + dst_frac_field=coverage_field, + ) regrid_method(src_field, dst_field) data = np.ma.masked_array(dst_field.data) @@ -268,7 +299,7 @@ def _valid_units(coord): data.mask = coverage_field.data < coverage_tolerance_threshold # Transpose ESMF result dims (X,Y) back to the order of the source - if (src_dims_xy[0] > src_dims_xy[1]): + if src_dims_xy[0] > src_dims_xy[1]: data = data.transpose() # Paste regridded slice back into parent array @@ -283,19 +314,28 @@ def _valid_units(coord): # TODO: can this not also be wound into the _create_cube method ? src_cs = src_coords[0].coord_system sample_grid_x, sample_grid_y = RectilinearRegridder._sample_grid( - src_cs, dst_coords[0], dst_coords[1]) + src_cs, dst_coords[0], dst_coords[1] + ) # Return result as a new cube based on the source. # TODO: please tidy this interface !!! - return RectilinearRegridder._create_cube( - fullcube_data, - src=source_cube, - x_dim=src_dims_xy[0], - y_dim=src_dims_xy[1], + _regrid_callback = functools.partial( + RectilinearRegridder._regrid, src_x_coord=src_coords[0], src_y_coord=src_coords[1], - grid_x_coord=dst_coords[0], - grid_y_coord=dst_coords[1], sample_grid_x=sample_grid_x, sample_grid_y=sample_grid_y, - regrid_callback=RectilinearRegridder._regrid) + ) + + def regrid_callback(*args, **kwargs): + _data, dims = args + return _regrid_callback(_data, *dims, **kwargs) + + return _create_cube( + fullcube_data, + source_cube, + [src_dims_xy[0], src_dims_xy[1]], + [dst_coords[0], dst_coords[1]], + 2, + regrid_callback, + ) diff --git a/lib/iris/experimental/representation.py b/lib/iris/experimental/representation.py index de0bfb229e..0648cc8e0d 100644 --- a/lib/iris/experimental/representation.py +++ b/lib/iris/experimental/representation.py @@ -1,34 +1,16 @@ -# (C) British Crown Copyright 2018 - 2019, Met Office +# Copyright Iris contributors # -# This file is part of Iris. -# -# Iris is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Iris is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with Iris. If not, see . - -""" -Definitions of how Iris objects should be represented. - -""" +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. -from __future__ import (absolute_import, division, print_function) -from six.moves import (filter, input, map, range, zip) # noqa +"""Definitions of how Iris objects should be represented.""" +from html import escape import re -class CubeRepresentation(object): - """ - Produce representations of a :class:`~iris.cube.Cube`. +class CubeRepresentation: + """Produce representations of a :class:`~iris.cube.Cube`. This includes: @@ -96,31 +78,46 @@ class CubeRepresentation(object): def __init__(self, cube): self.cube = cube self.cube_id = id(self.cube) - self.cube_str = str(self.cube) - - self.str_headings = { - 'Dimension coordinates:': None, - 'Auxiliary coordinates:': None, - 'Derived coordinates:': None, - 'Scalar coordinates:': None, - 'Attributes:': None, - 'Cell methods:': None, + self.cube_str = escape(str(self.cube)) + + # Define the expected vector and scalar sections in output, in expected + # order of appearance. + # NOTE: if we recoded this to use a CubeSummary, these section titles + # would be available from that. + self.vector_section_names = [ + "Dimension coordinates:", + "Mesh coordinates:", + "Auxiliary coordinates:", + "Derived coordinates:", + "Cell measures:", + "Ancillary variables:", + ] + self.scalar_section_names = [ + "Mesh:", + "Scalar coordinates:", + "Scalar cell measures:", + "Cell methods:", + "Attributes:", + ] + self.sections_data = { + name: None for name in self.vector_section_names + self.scalar_section_names } - self.dim_desc_coords = ['Dimension coordinates:', - 'Auxiliary coordinates:', - 'Derived coordinates:'] + # 'Scalar-cell-measures' is currently alone amongst the scalar sections, + # in displaying only a 'name' and no 'value' field. + self.single_cell_section_names = ["Scalar cell measures:"] # Important content that summarises a cube is defined here. self.shapes = self.cube.shape self.scalar_cube = self.shapes == () self.ndims = self.cube.ndim - self.name = self.cube.name().title().replace('_', ' ') - self.names = self._dim_names() - self.units = self.cube.units + self.name = escape(self.cube.name().title().replace("_", " ")) + self.names = [escape(dim_name) for dim_name in self._dim_names()] + self.units = escape(str(self.cube.units)) def _get_dim_names(self): - """ + """Get dimension-describing coordinate names. + Get dimension-describing coordinate names, or '--' if no coordinate] describes the dimension. @@ -133,36 +130,36 @@ def _get_dim_names(self): # Add the dim_coord names that participate in the associated data # dimensions. for dim in range(len(self.cube.shape)): - dim_coords = self.cube.coords(contains_dimension=dim, - dim_coords=True) + dim_coords = self.cube.coords(contains_dimension=dim, dim_coords=True) if dim_coords: dim_names[dim] = dim_coords[0].name() else: - dim_names[dim] = '--' + dim_names[dim] = "--" return dim_names def _dim_names(self): if self.scalar_cube: - dim_names = ['(scalar cube)'] + dim_names = ["(scalar cube)"] else: dim_names = self._get_dim_names() return dim_names def _get_lines(self): - return self.cube_str.split('\n') + return self.cube_str.split("\n") def _get_bits(self, bits): - """ + """Parse the body content (`bits`) of the cube string. + Parse the body content (`bits`) of the cube string in preparation for being converted into table rows. """ - left_indent = re.split(r'\w+', bits[1])[0] + left_indent = re.split(r"\w+", bits[1])[0] # Get heading indices within the printout. start_inds = [] - for hdg in self.str_headings.keys(): - heading = '{}{}'.format(left_indent, hdg) + for hdg in self.sections_data.keys(): + heading = "{}{}".format(left_indent, hdg) try: start_ind = bits.index(heading) except ValueError: @@ -176,137 +173,134 @@ def _get_bits(self, bits): for i0, i1 in zip(start_inds[:-1], start_inds[1:]): str_heading_name = bits[i0].strip() if i1 != 0: - content = bits[i0 + 1: i1] + content = bits[i0 + 1 : i1] else: - content = bits[i0 + 1:] - self.str_headings[str_heading_name] = content + content = bits[i0 + 1 :] + self.sections_data[str_heading_name] = content def _make_header(self): - """ + """Make the table header. + Make the table header. This is similar to the summary of the cube, but does not include dim shapes. These are included on the next table row down, and produced with `make_shapes_row`. """ # Header row. - tlc_template = \ - '{self.name} ({self.units})' + tlc_template = '{self.name} ({self.units})' top_left_cell = tlc_template.format(self=self) cells = ['', top_left_cell] for dim_name in self.names: - cells.append( - '{}'.format(dim_name)) - cells.append('') - return '\n'.join(cell for cell in cells) + cells.append('{}'.format(dim_name)) + cells.append("") + return "\n".join(cell for cell in cells) def _make_shapes_row(self): """Add a row to show data / dimensions shape.""" - title_cell = \ - 'Shape' + title_cell = 'Shape' cells = ['', title_cell] for shape in self.shapes: - cells.append( - '{}'.format(shape)) - cells.append('') - return '\n'.join(cell for cell in cells) + cells.append('{}'.format(shape)) + cells.append("") + return "\n".join(cell for cell in cells) def _make_row(self, title, body=None, col_span=0): - """ - Produce one row for the table body; i.e. - Coord namex-... - - `body` contains the content for each cell not in the left-most (title) - column. - If None, indicates this row is a title row (see below). - `title` contains the row heading. If `body` is None, indicates - that the row contains a sub-heading; - e.g. 'Dimension coordinates:'. - `col_span` indicates how many columns the string should span. + """Produce one row for the table body. + + Parameters + ---------- + title : str, optional + Contains the row heading. If `body` is None, indicates + that the row contains a sub-heading; + e.g. 'Dimension coordinates:'. + body : str, optional + Contains the content for each cell not in the left-most (title) column. + If None, indicates this row is a title row (see below). + col_span : int, default=0 + Indicates how many columns the string should span. + + Examples + -------- + :: + + Coord namex-.... """ row = [''] - template = ' {content}' + template = " {content}" if body is None: # This is a title row. # Strip off the trailing ':' from the title string. title = title.strip()[:-1] row.append( - template.format(html_cls=' class="iris-title iris-word-cell"', - content=title)) + template.format( + html_cls=' class="iris-title iris-word-cell"', + content=title, + ) + ) # Add blank cells for the rest of the rows. for _ in range(self.ndims): - row.append(template.format(html_cls=' class="iris-title"', - content='')) + row.append(template.format(html_cls=' class="iris-title"', content="")) else: # This is not a title row. # Deal with name of coord/attr etc. first. - sub_title = '\t{}'.format(title) - row.append(template.format( - html_cls=' class="iris-word-cell iris-subheading-cell"', - content=sub_title)) + sub_title = "\t{}".format(title) + row.append( + template.format( + html_cls=' class="iris-word-cell iris-subheading-cell"', + content=sub_title, + ) + ) # One further item or more than that? if col_span != 0: - html_cls = ' class="{}" colspan="{}"'.format('iris-word-cell', - col_span) + html_cls = ' class="{}" colspan="{}"'.format("iris-word-cell", col_span) row.append(template.format(html_cls=html_cls, content=body)) else: # "Inclusion" - `x` or `-`. for itm in body: - row.append(template.format( - html_cls=' class="iris-inclusion-cell"', - content=itm)) - row.append('') + row.append( + template.format( + html_cls=' class="iris-inclusion-cell"', + content=itm, + ) + ) + row.append("") return row - def _expand_last_cell(self, element, body): - '''Expand an element containing a cell by adding a new line.''' - split_point = element.index('') - element = element[:split_point] + '
    ' + body + element[split_point:] - return element - def _make_content(self): elements = [] - for k, v in self.str_headings.items(): + for k, v in self.sections_data.items(): if v is not None: # Add the sub-heading title. elements.extend(self._make_row(k)) for line in v: # Add every other row in the sub-heading. - if k in self.dim_desc_coords: - body = re.findall(r'[\w-]+', line) + if k in self.vector_section_names: + body = re.findall(r"[\w-]+", line) title = body.pop(0) colspan = 0 else: - try: - split_point = line.index(':') - except ValueError: - # When a line exists in v without a ':', we expect - # that this is due to the value of some attribute - # containing multiple lines. We collect all these - # lines in the same cell. - body = line.strip() - # We choose the element containing the last cell - # in the last row. - element = elements[-2] - element = self._expand_last_cell(element, body) - elements[-2] = element - continue + colspan = self.ndims + if k in self.single_cell_section_names: + title = line.strip() + body = "" else: + line = line.strip() + split_point = line.index(" ") title = line[:split_point].strip() - body = line[split_point + 2:].strip() - colspan = self.ndims - elements.extend( - self._make_row(title, body=body, col_span=colspan)) - return '\n'.join(element for element in elements) + body = line[split_point + 2 :].strip() + + elements.extend(self._make_row(title, body=body, col_span=colspan)) + return "\n".join(element for element in elements) def repr_html(self): - """The `repr` interface for Jupyter.""" + """Represent html, the `repr` interface for Jupyter.""" # Deal with the header first. header = self._make_header() # Check if we have a scalar cube. if self.scalar_cube: - shape = '' + shape = "" # We still need a single content column! self.ndims = 1 else: @@ -317,18 +311,17 @@ def repr_html(self): # If we only have a single line `cube_str` we have no coords / attrs! # We need to handle this case specially. if len(lines) == 1: - content = '' + content = "" else: self._get_bits(lines) content = self._make_content() - return self._template.format(header=header, - id=self.cube_id, - shape=shape, - content=content) + return self._template.format( + header=header, id=self.cube_id, shape=shape, content=content + ) -class CubeListRepresentation(object): +class CubeListRepresentation: _template = """